repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
AranKomat/Diff-DALLE
[ "9418e98e97b599c5c65f16ee168fedf76a29095f" ]
[ "diff_dalle/unet.py" ]
[ "from abc import abstractmethod\n\nimport math\n\nimport numpy as np\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .fp16_util import convert_module_to_f16, convert_module_to_f32, convert_module_to_f16_2\nfrom .nn import (\n checkpoint,\n conv_nd,\n linear,\n avg_pool_nd,\n zero_module,\n normalization,\n timestep_embedding,\n PositionalEncoding,\n LayerNorm32,\n FFN,\n)\n\n\nclass AttentionPool2d(nn.Module):\n \"\"\"\n Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py\n \"\"\"\n\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n super().__init__()\n self.positional_embedding = nn.Parameter(\n th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5\n )\n self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)\n self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)\n self.num_heads = embed_dim // num_heads_channels\n self.attention = QKVAttention(self.num_heads)\n\n def forward(self, x):\n b, c, *_spatial = x.shape\n x = x.reshape(b, c, -1) # NC(HW)\n x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) \n x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)\n x = self.qkv_proj(x)\n x = self.attention(x)\n x = self.c_proj(x)\n return x[:, :, 0]\n\n\nclass TimestepBlock(nn.Module):\n \"\"\"\n Any module where forward() takes timestep embeddings as a second argument.\n \"\"\"\n\n @abstractmethod\n def forward(self, x, emb):\n \"\"\"\n Apply the module to `x` given `emb` timestep embeddings.\n \"\"\"\n\nclass ConditionalBlock(nn.Module):\n \"\"\"\n Any module where forward() takes y as a second argument.\n \"\"\"\n\n @abstractmethod\n def forward(self, x, y):\n \"\"\"\n Apply the module to `x` given `y`.\n \"\"\"\n \n\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\n \"\"\"\n A sequential module that passes timestep embeddings to the children that\n support it as an extra input.\n \"\"\"\n\n def forward(self, x, emb, y=None):\n for layer in self:\n if isinstance(layer, TimestepBlock):\n x = layer(x, emb)\n elif isinstance(layer, ConditionalBlock):\n x = layer(x, y=y)\n else:\n x = layer(x)\n return x\n\n\nclass Upsample(nn.Module):\n \"\"\"\n An upsampling layer with an optional convolution.\n\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n upsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n if use_conv:\n self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n if self.dims == 3:\n x = F.interpolate(\n x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode=\"nearest\"\n )\n else:\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n if self.use_conv:\n x = self.conv(x)\n return x\n\n\nclass Downsample(nn.Module):\n \"\"\"\n A downsampling layer with an optional convolution.\n\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n downsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n stride = 2 if dims != 3 else (1, 2, 2)\n if use_conv:\n self.op = conv_nd(\n dims, self.channels, self.out_channels, 3, stride=stride, padding=1\n )\n else:\n assert self.channels == self.out_channels\n self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n return self.op(x)\n\n\nclass ResBlock(TimestepBlock):\n \"\"\"\n A residual block that can optionally change the number of channels.\n\n :param channels: the number of input channels.\n :param emb_channels: the number of timestep embedding channels.\n :param dropout: the rate of dropout.\n :param out_channels: if specified, the number of out channels.\n :param use_conv: if True and out_channels is specified, use a spatial\n convolution instead of a smaller 1x1 convolution to change the\n channels in the skip connection.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param use_checkpoint: if True, use gradient checkpointing on this module.\n :param up: if True, use this block for upsampling.\n :param down: if True, use this block for downsampling.\n \"\"\"\n\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n super().__init__()\n self.channels = channels\n self.emb_channels = emb_channels\n self.dropout = dropout\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_checkpoint = use_checkpoint\n self.use_scale_shift_norm = use_scale_shift_norm\n\n self.in_layers = nn.Sequential(\n normalization(channels),\n nn.SiLU(),\n conv_nd(dims, channels, self.out_channels, 3, padding=1),\n )\n\n self.updown = up or down\n\n if up:\n self.h_upd = Upsample(channels, False, dims)\n self.x_upd = Upsample(channels, False, dims)\n elif down:\n self.h_upd = Downsample(channels, False, dims)\n self.x_upd = Downsample(channels, False, dims)\n else:\n self.h_upd = self.x_upd = nn.Identity()\n\n self.emb_layers = nn.Sequential(\n nn.SiLU(),\n linear(\n emb_channels,\n 2 * self.out_channels if use_scale_shift_norm else self.out_channels,\n ),\n )\n self.out_layers = nn.Sequential(\n normalization(self.out_channels),\n nn.SiLU(),\n nn.Dropout(p=dropout),\n zero_module(\n conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)\n ),\n )\n\n if self.out_channels == channels:\n self.skip_connection = nn.Identity()\n elif use_conv:\n self.skip_connection = conv_nd(\n dims, channels, self.out_channels, 3, padding=1\n )\n else:\n self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)\n\n def forward(self, x, emb):\n \"\"\"\n Apply the block to a Tensor, conditioned on a timestep embedding.\n\n :param x: an [N x C x ...] Tensor of features.\n :param emb: an [N x emb_channels] Tensor of timestep embeddings.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n return checkpoint(\n self._forward, (x, emb), self.parameters(), self.use_checkpoint\n )\n\n def _forward(self, x, emb):\n if self.updown:\n in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]\n h = in_rest(x)\n h = self.h_upd(h)\n x = self.x_upd(x)\n h = in_conv(h)\n else:\n h = self.in_layers(x)\n emb_out = self.emb_layers(emb).type(h.dtype)\n while len(emb_out.shape) < len(h.shape):\n emb_out = emb_out[..., None]\n if self.use_scale_shift_norm:\n out_norm, out_rest = self.out_layers[0], self.out_layers[1:]\n scale, shift = th.chunk(emb_out, 2, dim=1)\n h = out_norm(h) * (1 + scale) + shift\n h = out_rest(h)\n else:\n h = h + emb_out\n h = self.out_layers(h)\n return self.skip_connection(x) + h\n\n\nclass SubAttentionBlock(nn.Module):\n \"\"\"\n An attention block that allows spatial positions to attend to each other.\n\n Originally ported from here, but adapted to the N-d case.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.\n \"\"\"\n\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n cross=False,\n enc_attn_dim=None,\n norm_type='group',\n dropout=0.,\n ):\n super().__init__()\n self.channels = channels\n if num_head_channels == -1:\n self.num_heads = num_heads\n else:\n assert (\n channels % num_head_channels == 0\n ), f\"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}\"\n self.num_heads = channels // num_head_channels\n self.use_checkpoint = use_checkpoint\n self.norm = normalization(channels) if norm_type == 'group' else LayerNorm32(channels)\n qkv_dim = channels * 3 if not cross else channels\n self.qkv = conv_nd(1, channels, qkv_dim, 1)\n # split heads before split qkv\n self.attention = QKVAttentionLegacy(self.num_heads, dropout=dropout)\n\n self.proj_out = conv_nd(1, channels, channels, 1)\n if norm_type == 'group':\n self.proj_out = zero_module(self.proj_out)\n self.dropout = nn.Dropout(p=dropout)\n \n if cross and channels != enc_attn_dim:\n self.adjust_kv_dim = conv_nd(1, enc_attn_dim, channels, 1)\n\n def forward(self, x, y=None):\n return checkpoint(self._forward, (x, y), self.parameters(), self.use_checkpoint)\n\n def _forward(self, x, y):\n b, c, *spatial = x.shape\n x = x.reshape(b, c, -1)\n qkv = self.qkv(self.norm(x))\n if hasattr(self, 'adjust_kv_dim'):\n y = self.adjust_kv_dim(y)\n \n h = self.attention(qkv, y=y)\n h = self.dropout(self.proj_out(h))\n return (x + h).reshape(b, c, *spatial)\n\n \nclass AttentionBlock(ConditionalBlock):\n \"\"\"\n An attention block that allows spatial positions to attend to each other.\n\n Originally ported from here, but adapted to the N-d case.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n self.attn1 = SubAttentionBlock(*args, **{**kwargs, **{'cross': False}}) \n if kwargs['cross']:\n self.attn2 = SubAttentionBlock(*args, **kwargs)\n\n def forward(self, x, y=None):\n x = self.attn1(x)\n if hasattr(self, 'attn2'):\n x = self.attn2(x, y) \n return x\n\n \ndef count_flops_attn(model, _x, y):\n \"\"\"\n A counter for the `thop` package to count the operations in an\n attention operation.\n Meant to be used like:\n macs, params = thop.profile(\n model,\n inputs=(inputs, timestamps),\n custom_ops={QKVAttention: QKVAttention.count_flops},\n )\n \"\"\"\n b, c, *spatial = y[0].shape\n num_spatial = int(np.prod(spatial))\n # We perform two matmuls with the same number of ops.\n # The first computes the weight matrix, the second computes\n # the combination of the value vectors.\n matmul_ops = 2 * b * (num_spatial ** 2) * c\n model.total_ops += th.DoubleTensor([matmul_ops])\n\n\nclass QKVAttentionLegacy(nn.Module):\n \"\"\"\n A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping\n \"\"\"\n\n def __init__(self, n_heads, dropout=0.):\n super().__init__()\n self.n_heads = n_heads\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, qkv, y):\n \"\"\"\n Apply QKV attention.\n\n :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n \"\"\"\n bs, width, length = qkv.shape\n if y is None:\n assert width % (3 * self.n_heads) == 0\n ch = width // (3 * self.n_heads)\n q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)\n else:\n assert width % (self.n_heads) == 0\n ch = width // (self.n_heads)\n q = qkv.reshape(bs * self.n_heads, ch, length)\n k = v = y.reshape(bs * self.n_heads, ch, -1)\n scale = 1 / math.sqrt(math.sqrt(ch))\n weight = th.einsum(\n \"bct,bcs->bts\", q * scale, k * scale\n ) # More stable with f16 than dividing afterwards\n weight = self.dropout(th.softmax(weight.float(), dim=-1).type(weight.dtype))\n a = th.einsum(\"bts,bcs->bct\", weight, v)\n return a.reshape(bs, -1, length)\n\n @staticmethod\n def count_flops(model, _x, y):\n return count_flops_attn(model, _x, y)\n\n\nclass QKVAttention(nn.Module):\n \"\"\"\n A module which performs QKV attention and splits in a different order.\n \"\"\"\n\n def __init__(self, n_heads):\n super().__init__()\n self.n_heads = n_heads\n\n def forward(self, qkv):\n \"\"\"\n Apply QKV attention.\n\n :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n \"\"\"\n bs, width, length = qkv.shape\n assert width % (3 * self.n_heads) == 0\n ch = width // (3 * self.n_heads)\n q, k, v = qkv.chunk(3, dim=1)\n scale = 1 / math.sqrt(math.sqrt(ch))\n weight = th.einsum(\n \"bct,bcs->bts\",\n (q * scale).view(bs * self.n_heads, ch, length),\n (k * scale).view(bs * self.n_heads, ch, length),\n ) # More stable with f16 than dividing afterwards\n weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)\n a = th.einsum(\"bts,bcs->bct\", weight, v.reshape(bs * self.n_heads, ch, length))\n return a.reshape(bs, -1, length)\n\n @staticmethod\n def count_flops(model, _x, y):\n return count_flops_attn(model, _x, y)\n \n \nclass TransformerEncoder(nn.Module):\n def __init__(self, enc_attn_dim, vocab_size, use_checkpoint, clip=False, dropout=0.):\n super().__init__()\n d_model = enc_attn_dim\n self.use_checkpoint = use_checkpoint\n self.encoder = nn.ModuleList([])\n for _ in range(d_model//64):\n self.encoder += [AttentionBlock(d_model, num_heads=d_model//64, norm_type='layernorm', cross=False, use_checkpoint=use_checkpoint, dropout=dropout)]\n self.encoder += [FFN(d_model, dropout=dropout)]\n \n self.pos_enc = PositionalEncoding(d_model, dropout=dropout)\n self.emb = nn.Embedding(vocab_size, d_model)\n \n if clip:\n self.clip_proj = conv_nd(1, enc_attn_dim, enc_attn_dim, 1)\n\n def forward(self, text):\n x = self.pos_enc(self.emb(text)).transpose(1, 2)\n for idx, layer in enumerate(self.encoder):\n x = checkpoint(layer.forward, (x,), layer.parameters(), self.use_checkpoint)\n\n if not hasattr(self, 'clip_proj'):\n return x\n else:\n return self.clip_proj(x[th.arange(x.shape[0]), :, text.argmax(dim=-1)].unsqueeze(-1)).squeeze(-1)\n \n\nclass UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param text_length: if specified (as an int), then this model will be\n conditional.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n use_checkpoint=False,\n use_fp16=True,\n num_heads=1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=True,\n resblock_updown=True,\n use_new_attention_order=False,\n enc_attn_dim=None,\n vocab_size=None,\n cross=True,\n text_level=False,\n dropout_text=0,\n cond_text=False,\n ):\n super().__init__()\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n self.num_res_blocks = num_res_blocks\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n \n self.cond_text = cond_text\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n if self.cond_text:\n self.text_encoder = TransformerEncoder(enc_attn_dim, vocab_size, use_checkpoint, dropout=dropout_text)\n else:\n cross = False\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for _ in range(num_res_blocks):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n enc_attn_dim=enc_attn_dim,\n cross=cross,\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n enc_attn_dim=enc_attn_dim,\n cross=cross,\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(num_res_blocks + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n enc_attn_dim=enc_attn_dim,\n cross=cross,\n )\n )\n if level and i == num_res_blocks:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n \n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n if hasattr(self, 'text_encoder'):\n self.text_encoder.apply(convert_module_to_f16_2)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n if hasattr(self, 'text_encoder'):\n self.text_encoder.apply(convert_module_to_f32)\n \n def forward(self, x, timesteps, y=None):\n \"\"\"\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param y: an [N, L] Tensor of texts, if conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n\n hs = []\n emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))\n \n if hasattr(self, 'text_encoder'):\n y = self.text_encoder(y) \n else:\n y = None\n \n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, y=y)\n hs.append(h)\n h = self.middle_block(h, emb, y=y)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, y=y)\n h = h.type(x.dtype)\n return self.out(h)\n\n\nclass SuperResModel(UNetModel):\n \"\"\"\n A UNetModel that performs super-resolution.\n\n Expects an extra kwarg `low_res` to condition on a low-resolution image.\n \"\"\"\n\n def __init__(self, image_size, in_channels, *args, **kwargs):\n super().__init__(image_size, in_channels * 2, *args, **kwargs)\n\n def forward(self, x, timesteps, **kwargs):\n _, _, new_height, new_width = x.shape\n upsampled = F.interpolate(kwargs.pop(\"low_res\"), (new_height, new_width), mode=\"bilinear\")\n x = th.cat([x, upsampled], dim=1)\n return super().forward(x, timesteps, **kwargs)\n\n \nclass Classifier(nn.Module):\n \"\"\"\n The half UNet model with attention and timestep embedding + text encoder as CLIP.\n\n \"\"\"\n \n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n use_checkpoint=False,\n use_fp16=True,\n num_heads=1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=True,\n resblock_updown=True,\n use_new_attention_order=False,\n enc_attn_dim=None,\n vocab_size=None,\n cross=False,\n dropout_text=0,\n ):\n super().__init__()\n self.image_encoder = ImageEncoder(\n image_size,\n in_channels,\n model_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=dropout,\n channel_mult=channel_mult,\n conv_resample=conv_resample,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_fp16=use_fp16,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n num_heads_upsample=num_heads_upsample,\n use_scale_shift_norm=use_scale_shift_norm,\n resblock_updown=resblock_updown,\n use_new_attention_order=use_new_attention_order,\n enc_attn_dim=enc_attn_dim,\n cross=cross,\n )\n self.text_encoder = TransformerEncoder(enc_attn_dim, vocab_size, use_checkpoint, clip=True, dropout=dropout_text)\n self.logit_scale = LogitScale()\n\n def convert_to_fp16(self):\n self.text_encoder.apply(convert_module_to_f16_2)\n self.image_encoder.apply(convert_module_to_f16)\n \n def convert_to_fp32(self):\n self.text_encoder.apply(convert_module_to_f32)\n self.image_encoder.apply(convert_module_to_f32) \n \n def clip_loss(x, timesteps, y):\n image_features = self.image_encoder(x, timesteps)\n text_features = self.text_encoder(y)\n logit_scale = self.logit_scale(image_features.dtype)\n return clip_loss(image_features, text_features, logit_scale)\n\n \nclass LogitScale(nn.Module):\n def __init__(self):\n super().__init__()\n self.logit_scale = nn.Parameter(th.ones([]) * np.log(1 / 0.07))\n self.max_log_temp = np.log(100)\n \n def forward(self, dtype):\n logit_scale = self.max_log_temp - F.softplus(self.max_log_temp - self.logit_scale) \n return logit_scale.exp().type(dtype)\n \n \nclass TextEncoder(nn.Module):\n def __init__(\n self,\n enc_attn_dim,\n vocab_size,\n use_checkpoint,\n dropout_text,\n ):\n super().__init__()\n self.text_encoder = TransformerEncoder(enc_attn_dim, vocab_size, use_checkpoint, clip=True, dropout=dropout_text)\n \n def forward(self, y):\n text_features = self.text_encoder(y) \n return F.normalize(text_features, dim=-1)\n \n\nclass ImageEncoder(nn.Module):\n \"\"\"\n The half UNet model with attention and timestep embedding.\n \n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n use_checkpoint=False,\n use_fp16=True,\n num_heads=1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=True,\n resblock_updown=True,\n use_new_attention_order=False,\n enc_attn_dim=None,\n cross=False,\n ):\n super().__init__()\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.num_res_blocks = num_res_blocks\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n \n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for _ in range(num_res_blocks):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n cross=cross,\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n cross=cross,\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n assert num_head_channels != -1\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n AttentionPool2d(\n (image_size // ds), ch, num_head_channels, enc_attn_dim\n ),\n )\n \n def forward(self, x, timesteps):\n \"\"\"\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :return: an [N x K] Tensor of outputs.\n \"\"\"\n emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))\n \n results = []\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb)\n h = self.middle_block(h, emb).type(self.dtype)\n image_features = self.out(h)\n # normalized features\n image_features = F.normalize(image_features, dim=-1)\n return image_features" ]
[ [ "torch.nn.functional.normalize", "torch.nn.Dropout", "torch.cat", "torch.nn.Identity", "torch.nn.functional.softplus", "torch.einsum", "torch.nn.ModuleList", "numpy.log", "torch.nn.SiLU", "torch.nn.functional.interpolate", "torch.arange", "torch.ones", "torch.DoubleTensor", "torch.randn", "numpy.prod", "torch.nn.Embedding", "torch.chunk" ] ]
azhou42/tensorflow-models-private
[ "e9cf8d1b84a3842c85b1ad9621fb2f3b1c5523a9" ]
[ "research/pcl_rl/env_spec.py" ]
[ "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Utilities for environment interface with agent / tensorflow.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange\n\n\nclass spaces(object):\n discrete = 0\n box = 1\n\n\ndef get_space(space):\n if hasattr(space, 'n'):\n return space.n, spaces.discrete, None\n elif hasattr(space, 'shape'):\n return np.prod(space.shape), spaces.box, (space.low, space.high)\n\n\ndef get_spaces(spaces):\n if hasattr(spaces, 'spaces'):\n return zip(*[get_space(space) for space in spaces.spaces])\n else:\n return [(ret,) for ret in get_space(spaces)]\n\n\nclass EnvSpec(object):\n\n def __init__(self, env, try_combining_actions=True,\n discretize_actions=None):\n self.discretize_actions = discretize_actions\n\n # figure out observation space\n self.obs_space = env.observation_space\n self.obs_dims, self.obs_types, self.obs_info = get_spaces(self.obs_space)\n\n # figure out action space\n self.act_space = env.action_space\n self.act_dims, self.act_types, self.act_info = get_spaces(self.act_space)\n\n if self.discretize_actions:\n self._act_dims = self.act_dims[:]\n self._act_types = self.act_types[:]\n self.act_dims = []\n self.act_types = []\n for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):\n if typ == spaces.discrete:\n self.act_dims.append(dim)\n self.act_types.append(spaces.discrete)\n elif typ == spaces.box:\n for _ in xrange(dim):\n self.act_dims.append(self.discretize_actions)\n self.act_types.append(spaces.discrete)\n else:\n self._act_dims = None\n self._act_types = None\n\n if (try_combining_actions and\n all(typ == spaces.discrete for typ in self.act_types)):\n self.combine_actions = True\n self.orig_act_dims = self.act_dims[:]\n self.orig_act_types = self.act_types[:]\n total_act_dim = 1\n for dim in self.act_dims:\n total_act_dim *= dim\n self.act_dims = [total_act_dim]\n self.act_types = [spaces.discrete]\n else:\n self.combine_actions = False\n\n self.obs_dims_and_types = list(zip(self.obs_dims, self.obs_types))\n self.act_dims_and_types = list(zip(self.act_dims, self.act_types))\n self.total_obs_dim = sum(self.obs_dims)\n self.total_sampling_act_dim = sum(self.sampling_dim(dim, typ)\n for dim, typ in self.act_dims_and_types)\n self.total_sampled_act_dim = sum(self.act_dims)\n\n def sampling_dim(self, dim, typ):\n if typ == spaces.discrete:\n return dim\n elif typ == spaces.box:\n return 2 * dim # Gaussian mean and std\n else:\n assert False\n\n def convert_actions_to_env(self, actions):\n if self.combine_actions:\n new_actions = []\n actions = actions[0]\n for dim in self.orig_act_dims:\n new_actions.append(np.mod(actions, dim))\n actions = (actions / dim).astype('int32')\n actions = new_actions\n\n if self.discretize_actions:\n new_actions = []\n idx = 0\n for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):\n if typ == spaces.discrete:\n new_actions.append(actions[idx])\n idx += 1\n elif typ == spaces.box:\n low, high = self.act_info[i]\n cur_action = []\n for j in xrange(dim):\n cur_action.append(\n low[j] + (high[j] - low[j]) * actions[idx] /\n float(self.discretize_actions))\n idx += 1\n new_actions.append(np.hstack(cur_action))\n actions = new_actions\n\n return actions\n\n def convert_env_actions_to_actions(self, actions):\n if not self.combine_actions:\n return actions\n\n new_actions = 0\n base = 1\n for act, dim in zip(actions, self.orig_act_dims):\n new_actions = new_actions + base * act\n base *= dim\n\n return [new_actions]\n\n def convert_obs_to_list(self, obs):\n if len(self.obs_dims) == 1:\n return [obs]\n else:\n return list(obs)\n\n def convert_action_to_gym(self, action):\n if len(action) == 1:\n return action[0]\n else:\n return list(action)\n if ((not self.combine_actions or len(self.orig_act_dims) == 1) and\n (len(self.act_dims) == 1 or\n (self.discretize_actions and len(self._act_dims) == 1))):\n return action[0]\n else:\n return list(action)\n\n def initial_obs(self, batch_size):\n batched = batch_size is not None\n batch_size = batch_size or 1\n\n obs = []\n for dim, typ in self.obs_dims_and_types:\n if typ == spaces.discrete:\n obs.append(np.zeros(batch_size))\n elif typ == spaces.box:\n obs.append(np.zeros([batch_size, dim]))\n\n if batched:\n return obs\n else:\n return zip(*obs)[0]\n\n def initial_act(self, batch_size=None):\n batched = batch_size is not None\n batch_size = batch_size or 1\n\n act = []\n for dim, typ in self.act_dims_and_types:\n if typ == spaces.discrete:\n act.append(-np.ones(batch_size))\n elif typ == spaces.box:\n act.append(-np.ones([batch_size, dim]))\n\n if batched:\n return act\n else:\n return zip(*act)[0]\n\n def is_discrete(self, typ):\n return typ == spaces.discrete\n\n def is_box(self, typ):\n return typ == spaces.box\n" ]
[ [ "numpy.zeros", "numpy.ones", "numpy.prod", "numpy.hstack", "numpy.mod" ] ]
kamakazikamikaze/wotc-bot-twitter
[ "d5f16654529ac3bb7ba936f156d8d20cbc26ef29" ]
[ "bot.py" ]
[ "from argparse import ArgumentParser\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\nfrom elasticsearch6 import Elasticsearch\nfrom json import dump, load\nfrom math import pi, sin, cos\nfrom matplotlib import pyplot as plt\nfrom matplotlib import dates as mdates\nfrom matplotlib import ticker as mtick\nfrom requests import get\nfrom tweepy import OAuthHandler, API\nimport traceback\n\n\n# Multi-day, use gte\nbattles_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"interval\": \"1d\",\n \"min_doc_count\": 0\n },\n \"aggs\": {\n \"3\": {\n \"terms\": {\n \"field\": \"console.keyword\",\n \"size\": 2,\n \"order\": {\n \"1\": \"desc\"\n },\n \"min_doc_count\": 0\n },\n \"aggs\": {\n \"1\": {\n \"sum\": {\n \"field\": \"battles\"\n }\n }\n }\n }\n }\n }\n },\n \"size\": 0,\n \"_source\": {\"excludes\": []},\n \"stored_fields\": [\"*\"],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match_all\": {}},\n {\"match_all\": {}},\n {\n \"range\": {\n \"date\": {\n \"gte\": None,\n \"lte\": None,\n \"format\": \"date\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\n# Multi-day, use gte\nplayers_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"interval\": \"1d\",\n \"min_doc_count\": 0\n },\n \"aggs\": {\n \"3\": {\n \"terms\": {\n \"field\": \"console.keyword\",\n \"size\": 2,\n \"order\": {\n \"_count\": \"desc\"\n },\n \"min_doc_count\": 0\n }\n }\n }\n }\n },\n \"size\": 0,\n \"_source\": {\"excludes\": []},\n \"stored_fields\": [\"*\"],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match_all\": {}},\n {\"match_all\": {}},\n {\n \"range\": {\n \"date\": {\n \"gte\": None,\n \"lte\": None,\n \"format\": \"date\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\nunique_count_query = {\n \"aggs\": {\n \"2\": {\n \"terms\": {\n \"field\": \"console.keyword\",\n \"size\": 2,\n \"order\": {\n \"1\": \"desc\"\n }\n },\n \"aggs\": {\n \"1\": {\n \"cardinality\": {\n \"field\": \"account_id\"\n }\n }\n }\n }\n },\n \"size\": 0,\n \"_source\": {\"excludes\": []},\n \"stored_fields\": [\"*\"],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match_all\": {}},\n {\"match_all\": {}},\n {\n \"range\": {\n \"date\": {\n \"gte\": None,\n \"lte\": None,\n \"format\": \"date\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\nnew_players_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"created_at\",\n \"interval\": \"1d\",\n \"min_doc_count\": 0\n },\n \"aggs\": {\n \"3\": {\n \"terms\": {\n \"field\": \"console.keyword\",\n \"size\": 2,\n \"order\": {\n \"_count\": \"desc\"\n },\n \"min_doc_count\": 0\n }\n }\n }\n }\n },\n \"size\": 0,\n \"_source\": {\"excludes\": []},\n \"stored_fields\": [\"*\"],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"created_at\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match_all\": {}},\n {\"match_all\": {}},\n {\n \"range\": {\n \"created_at\": {\n \"gte\": None,\n \"lt\": None,\n \"format\": \"date\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\npersonal_players_query = {\n 'sort': [],\n '_source': {'excludes': []},\n 'aggs': {\n '2': {\n 'date_histogram': {\n 'field': 'date',\n 'interval': '1d',\n 'min_doc_count': 0\n }\n }\n },\n 'stored_fields': ['_source'],\n 'script_fields': {},\n 'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],\n 'query': {\n 'bool': {\n 'must': [\n {'match_all': {}},\n {\n 'range': {\n 'date': {\n 'gt': None,\n 'lte': None,\n 'format': 'date'\n }\n }\n }\n ],\n 'filter': [],\n 'should': [],\n 'must_not': []\n }\n },\n 'size': 500\n}\n\naccounts_per_battles_range_query = {\n 'aggs': {\n '2': {\n 'range': {\n 'field': 'battles',\n 'ranges': [\n {'from': 1, 'to': 5},\n {'from': 5, 'to': 10},\n {'from': 10, 'to': 20},\n {'from': 20, 'to': 30},\n {'from': 30, 'to': 40},\n {'from': 40, 'to': 50},\n {'from': 50}\n ],\n 'keyed': True\n },\n 'aggs': {\n '3': {\n 'terms': {\n 'field': 'console.keyword',\n 'size': 2,\n 'order': {'_count': 'desc'}\n }\n }\n }\n }\n },\n 'size': 0,\n '_source': {'excludes': []},\n 'stored_fields': ['*'],\n 'script_fields': {},\n 'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],\n 'query': {\n 'bool': {\n 'must': [\n {'match_all': {}},\n {'match_all': {}},\n {'range': {'date': {'gt': None, 'lte': None, 'format': 'date'}}}\n ],\n 'filter': [],\n 'should': [],\n 'must_not': []\n }\n }\n}\n\nfive_battles_a_day_query = {\n 'aggs': {\n '4': {\n 'date_histogram': {\n 'field': 'date',\n 'interval': '1d',\n 'min_doc_count': 0\n },\n 'aggs': {\n '3': {\n 'terms': {\n 'field': 'console.keyword',\n 'size': 2,\n 'order': {'_count': 'desc'}\n },\n 'aggs': {\n '2': {\n 'range': {\n 'field': 'battles',\n 'ranges': [{'from': 5, 'to': None}],\n 'keyed': True\n }\n }\n }\n }\n }\n }\n },\n 'size': 0,\n '_source': {'excludes': []},\n 'stored_fields': ['*'],\n 'script_fields': {},\n 'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],\n 'query': {\n 'bool': {\n 'must': [\n {'match_all': {}},\n {'match_all': {}},\n {\n 'range': {\n 'date': {\n 'gte': None,\n 'lte': None,\n 'format': 'date'\n }\n }\n }\n ],\n 'filter': [],\n 'should': [],\n 'must_not': []\n }\n }\n}\n\nCW_TANKS = 'ASSIGN `build_cw_tanks_list(config)` TO ME'\n\ncw_popular_tanks_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"interval\": \"1d\",\n \"min_doc_count\": 0\n },\n \"aggs\": {\n \"4\": {\n \"terms\": {\n \"field\": \"console.keyword\",\n \"size\": 5,\n \"order\": {\n \"1\": \"desc\"\n }\n },\n \"aggs\": {\n \"1\": {\n \"sum\": {\n \"field\": \"battles\"\n }\n },\n \"3\": {\n \"terms\": {\n \"field\": \"tank_id\",\n \"size\": 5,\n \"order\": {\n \"1\": \"desc\"\n }\n },\n \"aggs\": {\n \"1\": {\n \"sum\": {\n \"field\": \"battles\"\n }\n }\n }\n }\n }\n }\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"query_string\": {\n \"query\": CW_TANKS,\n \"analyze_wildcard\": True,\n \"default_field\": \"*\"\n }\n },\n {\n \"range\": {\n \"date\": {\n \"gte\": None,\n \"lte\": None,\n \"format\": \"date\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\nww2_popular_tanks_query = {\n \"aggs\": {\n \"2\": {\n \"date_histogram\": {\n \"field\": \"date\",\n \"interval\": \"30m\",\n \"time_zone\": \"America/Chicago\",\n \"min_doc_count\": 0\n },\n \"aggs\": {\n \"4\": {\n \"terms\": {\n \"field\": \"console.keyword\",\n \"size\": 5,\n \"order\": {\n \"1\": \"desc\"\n }\n },\n \"aggs\": {\n \"1\": {\n \"sum\": {\n \"field\": \"battles\"\n }\n },\n \"3\": {\n \"terms\": {\n \"field\": \"tank_id\",\n \"size\": 5,\n \"order\": {\n \"1\": \"desc\"\n }\n },\n \"aggs\": {\n \"1\": {\n \"sum\": {\n \"field\": \"battles\"\n }\n }\n }\n }\n }\n }\n }\n }\n },\n \"size\": 0,\n \"_source\": {\n \"excludes\": []\n },\n \"stored_fields\": [\n \"*\"\n ],\n \"script_fields\": {},\n \"docvalue_fields\": [\n {\n \"field\": \"date\",\n \"format\": \"date_time\"\n }\n ],\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"query_string\": {\n \"query\": 'NOT (' + CW_TANKS + ')',\n \"analyze_wildcard\": True,\n \"default_field\": \"*\"\n }\n },\n {\n \"range\": {\n \"date\": {\n \"gte\": None,\n \"lte\": None,\n \"format\": \"date\"\n }\n }\n }\n ],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n}\n\nBATTLES_PNG = '/tmp/battles.png'\nPLAYERS_PNG = '/tmp/players.png'\nNEWPLAYERS_PNG = '/tmp/newplayers.png'\nAVERAGE_PNG = '/tmp/average.png'\nACCOUNTAGE_PNG = '/tmp/accountage.png'\nBATTLERANGE_PNG = '/tmp/battlerange.png'\nFIVEADAY_PNG = '/tmp/fiveaday.png'\nPLAYERSLONG_PNG = '/tmp/playerslong.png'\nBATTLESLONG_PNG = '/tmp/battleslong.png'\nAVERAGELONG_PNG = '/tmp/averagelong.png'\nMODEBREAKDOWN_PNG = '/tmp/modebreakdown.png'\nMODEBREAKDOWNLONG_PNG = '/tmp/modebreakdownlong.png'\nMODEBREAKDOWNPERCENT_PNG = '/tmp/modebreakdownpercent.png'\nMODEBREAKDOWNPERCENTLONG_PNG = '/tmp/modebreakdownpercentlong.png'\n\ndef manage_config(mode, filename='config.json'):\n if mode == 'read':\n with open(filename) as f:\n return load(f)\n elif mode == 'create':\n with open(filename, 'w') as f:\n dump(\n {\n 'days': 14,\n 'long term': 90,\n 'omit errors long term': True,\n 'twitter': {\n 'api key': '',\n 'api secret key': '',\n 'access token': '',\n 'access token secret': '',\n 'message': \"Today's update on the active player count and total battles per platform for #worldoftanksconsole.\"\n },\n 'elasticsearch': {\n 'hosts': ['127.0.0.1']\n },\n 'battle index': 'diff_battles-*',\n 'tank index': 'diff_tanks-*',\n 'unique': [7, 14, 30],\n 'account age': [7, 30, 90, 180, 365, 730, 1095, 1460, 1825],\n 'battle ranges': [\n {\"from\": 1, \"to\": 5},\n {\"from\": 5, \"to\": 10},\n {\"from\": 10, \"to\": 20},\n {\"from\": 20, \"to\": 30},\n {\"from\": 30, \"to\": 40},\n {\"from\": 40, \"to\": 50},\n {\"from\": 50}\n ],\n 'watermark text': '@WOTC_Tracker',\n 'wg api key': 'DEMO'\n }\n )\n\n\ndef query_es_for_graphs(config):\n now = datetime.utcnow()\n then = now - timedelta(days=config['days'])\n es = Elasticsearch(**config['elasticsearch'])\n # Setup queries\n battles_query['query']['bool'][\n 'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n battles_query['query']['bool'][\n 'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n players_query['query']['bool'][\n 'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n players_query['query']['bool'][\n 'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n new_players_query['query']['bool'][\n 'must'][-1]['range']['created_at']['gte'] = then.strftime('%Y-%m-%d')\n new_players_query['query']['bool'][\n 'must'][-1]['range']['created_at']['lt'] = now.strftime('%Y-%m-%d')\n # Query Elasticsearch\n battles = es.search(index=config['battle index'], body=battles_query)\n players = es.search(index=config['battle index'], body=players_query)\n newplayers = es.search(index='players', body=new_players_query)\n # Filter numbers\n battles_xbox = []\n battles_ps = []\n players_xbox = []\n players_ps = []\n newplayers_xbox = []\n newplayers_ps = []\n averages_xbox = []\n averages_ps = []\n for bucket in battles['aggregations']['2']['buckets']:\n if not bucket['3']['buckets']:\n battles_xbox.append(0)\n battles_ps.append(0)\n continue\n for subbucket in bucket['3']['buckets']:\n if subbucket['key'] == 'xbox':\n battles_xbox.append(subbucket['1']['value'])\n else:\n battles_ps.append(subbucket['1']['value'])\n for bucket in players['aggregations']['2']['buckets']:\n if not bucket['3']['buckets']:\n players_xbox.append(0)\n players_ps.append(0)\n continue\n for subbucket in bucket['3']['buckets']:\n if subbucket['key'] == 'xbox':\n players_xbox.append(subbucket['doc_count'])\n else:\n players_ps.append(subbucket['doc_count'])\n for bucket in newplayers['aggregations']['2']['buckets']:\n if not bucket['3']['buckets']:\n newplayers_xbox.append(0)\n newplayers_ps.append(0)\n for subbucket in bucket['3']['buckets']:\n if subbucket['key'] == 'xbox':\n newplayers_xbox.append(subbucket['doc_count'])\n else:\n newplayers_ps.append(subbucket['doc_count'])\n for b, p in zip(battles_xbox, players_xbox):\n averages_xbox.append(b / p)\n for b, p in zip(battles_ps, players_ps):\n averages_ps.append(b / p)\n dates = [b['key_as_string'].split('T')[0] for b in players[\n 'aggregations']['2']['buckets']]\n newplayers_dates = [b['key_as_string'].split('T')[0] for b in newplayers[\n 'aggregations']['2']['buckets']]\n return dates, battles_xbox, battles_ps, players_xbox, players_ps, newplayers_dates, newplayers_xbox, newplayers_ps, averages_xbox, averages_ps\n\n\ndef query_es_for_unique(config):\n now = datetime.utcnow()\n es = Elasticsearch(**config['elasticsearch'])\n unique = {'Xbox': [], 'Playstation': []}\n unique_count_query['query']['bool'][\n 'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n for earliest in config['unique']:\n unique_count_query['query']['bool']['must'][-1]['range']['date'][\n 'gte'] = (now - timedelta(days=earliest)).strftime('%Y-%m-%d')\n results = es.search(index=config['battle index'], body=unique_count_query)\n for bucket in results['aggregations']['2']['buckets']:\n if bucket['key'] == 'xbox':\n unique['Xbox'].append(bucket['1']['value'])\n else:\n unique['Playstation'].append(bucket['1']['value'])\n return unique\n\n\ndef create_activity_graphs(dates, battles_xbox, battles_ps, players_xbox, players_ps, newplayers_dates, newplayers_xbox, newplayers_ps, averages_xbox, averages_ps, watermark_text='@WOTC_Tracker'):\n shifted_dates = [(datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') for d in dates]\n # Players PNG\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n fig.suptitle('Active Accounts Per Platform')\n # ax1 = plt.axes()\n ax1 = fig.add_subplot(111)\n ax1.tick_params(axis='x', labelrotation=45)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.set_xticklabels(shifted_dates, ha='right')\n ax1.plot(shifted_dates, players_xbox, color='green', linewidth=2, label='Xbox')\n ax1.plot(shifted_dates, players_ps, color='blue', linewidth=2, label='Playstation')\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(PLAYERS_PNG)\n del fig\n # Battles PNG\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n fig.suptitle('Total Battles Per Platform')\n # ax = plt.axes()\n ax1 = fig.add_subplot(111)\n ax1.tick_params(axis='x', labelrotation=45)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.set_xticklabels(shifted_dates, ha='right')\n ax1.plot(shifted_dates, battles_xbox, color='green', linewidth=2, label='Xbox')\n ax1.plot(shifted_dates, battles_ps, color='blue', linewidth=2, label='Playstation')\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(BATTLES_PNG)\n del fig\n # New Players PNG\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n fig.suptitle('New Accounts Per Platform')\n # ax = plt.axes()\n ax1 = fig.add_subplot(111)\n ax1.tick_params(axis='x', labelrotation=45)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.set_xticklabels(dates, ha='right')\n ax1.plot(newplayers_dates, newplayers_xbox, color='green', linewidth=2, label='Xbox')\n ax1.plot(newplayers_dates, newplayers_ps, color='blue', linewidth=2, label='Playstation')\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(NEWPLAYERS_PNG)\n del fig\n # Averages PNG\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n fig.suptitle('Average Battles Played Per Account Per Platform')\n # ax = plt.axes()\n ax1 = fig.add_subplot(111)\n ax1.tick_params(axis='x', labelrotation=45)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.set_xticklabels(shifted_dates, ha='right')\n ax1.plot(shifted_dates, averages_xbox, color='green', linewidth=2, label='Xbox')\n ax1.plot(shifted_dates, averages_ps, color='blue', linewidth=2, label='Playstation')\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(AVERAGE_PNG)\n del fig\n\n\ndef query_es_for_active_accounts(config):\n now = datetime.utcnow()\n then = now - timedelta(days=1)\n es = Elasticsearch(**config['elasticsearch'])\n personal_players_query['query']['bool']['must'][-1]['range']['date']['gt'] = then.strftime('%Y-%m-%d')\n personal_players_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n\n # Get all account IDs of active players\n hits = []\n response = es.search(index=config['battle index'], body=personal_players_query, scroll='30s')\n while len(response['hits']['hits']):\n hits.extend(response['hits']['hits'])\n response = es.scroll(scroll_id=response['_scroll_id'], scroll='3s')\n\n flattened = [doc['_source']['account_id'] for doc in hits]\n\n # Query account information to get age details\n player_info_extracted = []\n for i in range(0, len(flattened), 10000):\n active_player_info = es.mget(index='players', doc_type='player', body={'ids': flattened[i:i+10000]}, _source=['account_id', 'console', 'created_at'])\n player_info_extracted.extend([doc['_source'] for doc in active_player_info['docs']])\n\n sorted_player_info = sorted(player_info_extracted, key = lambda d: d['created_at'])\n buckets = {\n \"xbox\": OrderedDict((v, 0) for v in sorted(config['account age'])),\n \"ps\": OrderedDict((v, 0) for v in sorted(config['account age'])),\n \"all\": OrderedDict((v, 0) for v in sorted(config['account age']))\n }\n\n # Sum account ages based on range of age\n buckets['xbox']['other'] = 0\n buckets['ps']['other'] = 0\n buckets['all']['other'] = 0\n for player in sorted_player_info:\n delta = now - datetime.strptime(player['created_at'], '%Y-%m-%dT%H:%M:%S')\n for key in buckets['all'].keys():\n if not isinstance(key, int):\n buckets['all'][key] += 1\n buckets[player['console']][key] += 1\n break\n elif delta.total_seconds() <= (key * 24 * 60 * 60):\n buckets['all'][key] += 1\n buckets[player['console']][key] += 1\n break\n return buckets\n\n\ndef calc_label(value):\n if value < 7:\n return '{} day{}'.format(value, '' if value == 1 else 's')\n elif 7 <= value < 30:\n return '{} week{}'.format(value // 7, '' if value // 7 == 1 else 's')\n elif 30 <= value < 365:\n return '{} month{}'.format(value // 30, '' if value // 30 == 1 else 's')\n else:\n return '{} year{}'.format(value // 365, '' if value // 365 == 1 else 's')\n\n\ndef calc_angle(wedge):\n return (wedge.theta2 - wedge.theta1) / 2 + wedge.theta1\n\n\ndef create_account_age_chart(buckets, watermark_text='@WOTC_Tracker'):\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n then = datetime.utcnow() - timedelta(days=1)\n fig.suptitle(\"Breakdown of active accounts by account age for {}\".format(then.strftime('%Y-%m-%d')))\n ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10)\n ax1.axis('equal')\n size = 0.125\n\n outer_labels = []\n prev = 0\n for key in buckets['all'].keys():\n if not isinstance(key, int):\n outer_labels.append('>' + calc_label(prev))\n else:\n outer_labels.append('{} - {}'.format(calc_label(prev), calc_label(key)))\n prev = key\n\n # Outer pie chart\n outer_cmap = plt.get_cmap(\"binary\")\n outer_colors = outer_cmap([i * 10 for i in range(10, len(buckets['all'].keys()) + 11)])\n outer_wedges, outer_text, outer_autotext = ax1.pie(\n buckets['all'].values(),\n explode=[0.1 for __ in outer_labels],\n radius=1,\n colors=outer_colors,\n wedgeprops=dict(width=size, edgecolor='w'),\n autopct='%1.1f%%',\n pctdistance=1.1\n #labels=outer_labels\n )\n\n bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle='-'), bbox=bbox_props, zorder=0, va='center')\n for i, wedge in enumerate(outer_wedges):\n angle = calc_angle(wedge)\n y = sin(angle * (pi / 180))\n x = cos(angle * (pi / 180))\n align = 'right' if x < 0 else 'left'\n connectionstyle = 'angle,angleA=0,angleB={}'.format(angle)\n kw['arrowprops'].update({'connectionstyle': connectionstyle})\n ax1.annotate(\n outer_labels[i],\n xy=(x, y),\n xytext=(1.35*(-1 if x < 0 else 1), 1.4*y),\n horizontalalignment=align,\n **kw\n )\n\n # Inner pie chart\n inner_cmap = plt.get_cmap(\"tab20c\")\n pie_flat = list(zip(buckets['xbox'].values(), buckets['ps'].values()))\n inner_labels = []\n for pair in pie_flat:\n inner_labels.extend(['xbox', 'ps'])\n inner_colors = inner_cmap([1 if console == 'ps' else 9 for console in inner_labels])\n inner_wedges, inner_text, inner_autotext = ax1.pie(\n [item for sublist in pie_flat for item in sublist],\n explode=[0.1 for __ in inner_labels],\n radius=1.05-size,\n colors=inner_colors,\n wedgeprops=dict(width=size, edgecolor='w'),\n autopct='',\n pctdistance=0.9\n )\n\n # Replace inner text with actual values\n for i, label, wedge, text in zip(range(len(inner_wedges)), inner_labels, inner_wedges, inner_autotext):\n text.set_text(buckets[label]['other' if i // 2 > len(buckets['all'].keys()) - 1 else list(buckets['all'].keys())[i // 2]])\n angle = calc_angle(wedge)\n if 90 < angle < 270:\n angle += 180\n text.set_rotation(angle)\n\n # Patch inner wedges to group together in explosion\n # Influenced by: https://stackoverflow.com/a/20556088/1993468\n groups = [[i, i+1] for i in range(0, len(inner_wedges), 2)]\n radfraction = 0.1\n for group in groups:\n angle = ((inner_wedges[group[-1]].theta2 + inner_wedges[group[0]].theta1)/2) * (pi / 180)\n for g in group:\n wedge = inner_wedges[g]\n wedge.set_center((radfraction * wedge.r * cos(angle), radfraction * wedge.r * sin(angle)))\n\n # Add subplot in second row, below nested pie chart\n ax2 = plt.subplot2grid((11, 1), (10, 0))\n ax2.axhline(color='black', y=0)\n # Xbox, Playstation\n totals = [sum(buckets['xbox'].values()), sum(buckets['ps'].values()), sum(buckets['all'].values())]\n ypos = -0.18\n bottom = 0\n height = 0.1\n for i in range(len(totals) - 1):\n width = totals[i] / totals[-1]\n ax2.barh(ypos, width, height, left=bottom, color=inner_colors[i])\n xpos = bottom + ax2.patches[i].get_width() / 2\n bottom += width\n ax2.text(xpos, ypos, '{} ({:.1f}%)'.format(totals[i], (totals[i] / totals[-1]) * 100), ha='center', va='center')\n\n ax2.axis('off')\n ax2.set_title('Total Active Players', y=0.325)\n ax2.set_xlim(0, 1)\n\n ax1.legend(inner_wedges[-2:], ['xbox', 'ps'], loc='lower right')\n fig.text(0.5, 0.5, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(ACCOUNTAGE_PNG)\n del fig\n\n\ndef query_es_for_accounts_by_battles(config):\n now = datetime.utcnow()\n then = now - timedelta(days=1)\n es = Elasticsearch(**config['elasticsearch'])\n accounts_per_battles_range_query['query']['bool']['must'][-1]['range']['date']['gt'] = then.strftime('%Y-%m-%d')\n accounts_per_battles_range_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n if 'battle ranges' in config:\n accounts_per_battles_range_query['aggs']['2']['range']['ranges'] = config['battle ranges']\n\n response = es.search(index=config['battle index'], body=accounts_per_battles_range_query)\n buckets = {\n \"xbox\": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),\n \"ps\": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),\n \"all\": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),\n }\n for key, value in response['aggregations']['2']['buckets'].items():\n buckets['all'][key] = value['doc_count']\n for bucket in value['3']['buckets']:\n buckets[bucket['key']][key] = bucket['doc_count']\n return buckets\n\n\ndef create_accounts_by_battles_chart(buckets, watermark_text='@WOTC_Tracker'):\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n then = datetime.utcnow() - timedelta(days=1)\n fig.suptitle(\"Breakdown of accounts by number of battles played for {}\".format(then.strftime('%Y-%m-%d')))\n # ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10)\n ax1 = plt.axes()\n ax1.axis('equal')\n size = 0.125\n\n outer_labels = []\n prev = 0\n for key in buckets['all'].keys():\n parts = key.split('-')\n outer_labels.append('{}-{} battles'.format(int(float(parts[0])) if parts[0] != '*' else parts[0], int(float(parts[1])) - 1 if parts[1] != '*' else parts[1]))\n\n # Outer pie chart\n outer_cmap = plt.get_cmap(\"binary\")\n outer_colors = outer_cmap([i * 10 for i in range(10, len(buckets['all'].keys()) + 11)])\n outer_wedges, outer_text, outer_autotext = ax1.pie(\n buckets['all'].values(),\n explode=[0.1 for __ in outer_labels],\n radius=1,\n colors=outer_colors,\n wedgeprops=dict(width=size, edgecolor='w'),\n autopct='%1.1f%%',\n pctdistance=1.1\n #labels=outer_labels\n )\n\n bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0.72)\n kw = dict(arrowprops=dict(arrowstyle='-'), bbox=bbox_props, zorder=0, va='center')\n for i, wedge in enumerate(outer_wedges):\n angle = calc_angle(wedge)\n y = sin(angle * (pi / 180))\n x = cos(angle * (pi / 180))\n align = 'right' if x < 0 else 'left'\n connectionstyle = 'angle,angleA=0,angleB={}'.format(angle)\n kw['arrowprops'].update({'connectionstyle': connectionstyle})\n ax1.annotate(\n outer_labels[i],\n xy=(x, y),\n xytext=(1.35*(-1 if x < 0 else 1), 1.4*y),\n horizontalalignment=align,\n **kw\n )\n\n # Inner pie chart\n inner_cmap = plt.get_cmap(\"tab20c\")\n pie_flat = list(zip(buckets['xbox'].values(), buckets['ps'].values()))\n inner_labels = []\n for pair in pie_flat:\n inner_labels.extend(['xbox', 'ps'])\n inner_colors = inner_cmap([1 if console == 'ps' else 9 for console in inner_labels])\n inner_wedges, inner_text, inner_autotext = ax1.pie(\n [item for sublist in pie_flat for item in sublist],\n explode=[0.1 for __ in inner_labels],\n radius=1.05-size,\n colors=inner_colors,\n wedgeprops=dict(width=size, edgecolor='w'),\n autopct='',\n pctdistance=0.9\n )\n\n # Replace inner text with actual values\n for i, label, wedge, text in zip(range(len(inner_wedges)), inner_labels, inner_wedges, inner_autotext):\n text.set_text(buckets[label]['other' if i // 2 > len(buckets['all'].keys()) - 1 else list(buckets['all'].keys())[i // 2]])\n angle = calc_angle(wedge)\n if 90 < angle < 270:\n angle += 180\n text.set_rotation(angle)\n\n # Patch inner wedges to group together in explosion\n # Influenced by: https://stackoverflow.com/a/20556088/1993468\n groups = [[i, i+1] for i in range(0, len(inner_wedges), 2)]\n radfraction = 0.1\n for group in groups:\n angle = ((inner_wedges[group[-1]].theta2 + inner_wedges[group[0]].theta1)/2) * (pi / 180)\n for g in group:\n wedge = inner_wedges[g]\n wedge.set_center((radfraction * wedge.r * cos(angle), radfraction * wedge.r * sin(angle)))\n\n ax1.legend(inner_wedges[-2:], ['xbox', 'ps'], loc='lower right')\n fig.text(0.5, 0.5, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(BATTLERANGE_PNG)\n del fig\n\n\ndef query_five_battles_a_day_minimum(config):\n now = datetime.utcnow()\n then = now - timedelta(days=config['days'])\n es = Elasticsearch(**config['elasticsearch'])\n five_battles_a_day_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n five_battles_a_day_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n response = es.search(index=config['battle index'], body=five_battles_a_day_query)\n\n buckets = {\n \"xbox\": OrderedDict(),\n \"ps\": OrderedDict(),\n \"all\": OrderedDict()\n }\n\n for bucket in response['aggregations']['4']['buckets']:\n key = bucket['key_as_string'].split('T')[0]\n buckets['xbox'][key] = 0\n buckets['ps'][key] = 0\n buckets['all'][key] = 0\n for subbucket in bucket['3']['buckets']:\n buckets[subbucket['key']][key] = subbucket['2']['buckets']['5.0-*']['doc_count']\n buckets['all'][key] = buckets['xbox'][key] + buckets['ps'][key]\n\n return buckets\n\n\n# Requested by Khorne Dog in the forums\ndef create_five_battles_minimum_chart(buckets, watermark_text='@WOTC_Tracker'):\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150)\n fig.suptitle(\"Number of accounts having played at least 5 battles\")\n ax1 = fig.add_subplot(111)\n\n width = 0.25\n keys = [datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1) for d in buckets['all'].keys()]\n xkeys = [d - timedelta(hours=3) for d in keys]\n pkeys = [d + timedelta(hours=3) for d in keys]\n xbox_bars = ax1.bar(xkeys, buckets['xbox'].values(), width=width, color='g')\n ps_bars = ax1.bar(pkeys, buckets['ps'].values(), width=width, color='b')\n ax1.table(\n cellText=[\n list(buckets['xbox'].values()),\n list(buckets['ps'].values()),\n list(buckets['all'].values())],\n rowLabels=['xbox', 'ps', 'all'],\n colLabels=[d.strftime('%Y-%m-%d') for d in keys],\n loc='bottom')\n ax1.set_ylabel('Accounts')\n ax1.set_xticks([])\n ax1.legend((xbox_bars[0], ps_bars[0]), ('xbox', 'ps'))\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(FIVEADAY_PNG)\n\n\ndef query_long_term_data(config, filter_server_failures=True):\n now = datetime.utcnow()\n then = now - timedelta(days=config.get('long term', 90) + 1)\n es = Elasticsearch(**config['elasticsearch'])\n # Setup queries\n battles_query['query']['bool'][\n 'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n battles_query['query']['bool'][\n 'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n players_query['query']['bool'][\n 'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n players_query['query']['bool'][\n 'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n\n players = es.search(index=config['battle index'], body=players_query)\n battles = es.search(index=config['battle index'], body=battles_query)\n\n players_buckets = {\n \"xbox\": OrderedDict(),\n \"ps\": OrderedDict(),\n \"all\": OrderedDict()\n }\n\n battles_buckets = {\n \"xbox\": OrderedDict(),\n \"ps\": OrderedDict(),\n \"all\": OrderedDict()\n }\n\n average_battles_per_day_buckets = {\n \"xbox\": OrderedDict(),\n \"ps\": OrderedDict(),\n \"all\": OrderedDict()\n }\n\n for bucket in players['aggregations']['2']['buckets']:\n key = bucket['key_as_string'].split('T')[0]\n players_buckets['xbox'][key] = 0\n players_buckets['ps'][key] = 0\n players_buckets['all'][key] = 0\n if not bucket['3']['buckets']:\n continue\n for subbucket in bucket['3']['buckets']:\n players_buckets[subbucket['key']][key] = subbucket['doc_count']\n players_buckets['all'][key] = players_buckets['xbox'][key] + players_buckets['ps'][key]\n\n for bucket in battles['aggregations']['2']['buckets']:\n key = bucket['key_as_string'].split('T')[0]\n battles_buckets['xbox'][key] = 0\n battles_buckets['ps'][key] = 0\n battles_buckets['all'][key] = 0\n if not bucket['3']['buckets']:\n continue\n for subbucket in bucket['3']['buckets']:\n battles_buckets[subbucket['key']][key] = subbucket['1']['value']\n battles_buckets['all'][key] = battles_buckets['xbox'][key] + battles_buckets['ps'][key]\n\n if filter_server_failures:\n skip_next = False\n for key, value in players_buckets['ps'].items():\n # 20,000 is way below normal. Sometimes the server dies partway through. This day should be skipped\n if value < 20000:\n players_buckets['xbox'][key] = None\n players_buckets['ps'][key] = None\n players_buckets['all'][key] = None\n battles_buckets['xbox'][key] = None\n battles_buckets['ps'][key] = None\n battles_buckets['all'][key] = None\n skip_next = True\n elif skip_next:\n players_buckets['xbox'][key] = None\n players_buckets['ps'][key] = None\n players_buckets['all'][key] = None\n battles_buckets['xbox'][key] = None\n battles_buckets['ps'][key] = None\n battles_buckets['all'][key] = None\n skip_next = False\n\n for key in players_buckets['all'].keys():\n if players_buckets['xbox'][key] is None:\n average_battles_per_day_buckets['all'][key] = None\n average_battles_per_day_buckets['xbox'][key] = None\n average_battles_per_day_buckets['ps'][key] = None\n else:\n average_battles_per_day_buckets['xbox'][key] = battles_buckets['xbox'][key] / players_buckets['xbox'][key]\n average_battles_per_day_buckets['ps'][key] = battles_buckets['ps'][key] / players_buckets['ps'][key]\n average_battles_per_day_buckets['all'][key] = (battles_buckets['xbox'][key] + battles_buckets['ps'][key]) / (players_buckets['xbox'][key] + players_buckets['ps'][key])\n\n delkey = list(players_buckets['all'].keys())[0]\n # delkey = list(battles_buckets['all'].keys())[0]\n del players_buckets['all'][key]\n del players_buckets['xbox'][key]\n del players_buckets['ps'][key]\n del battles_buckets['all'][key]\n del battles_buckets['xbox'][key]\n del battles_buckets['ps'][key]\n del average_battles_per_day_buckets['xbox'][key]\n del average_battles_per_day_buckets['ps'][key]\n del average_battles_per_day_buckets['all'][key]\n\n return players_buckets, battles_buckets, average_battles_per_day_buckets\n\n\ndef create_long_term_charts(players_buckets, battles_buckets, average_battles_per_day_buckets, watermark_text='@WOTC_Tracker'):\n dates = [datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1) for d in players_buckets['all'].keys()]\n # Players PNG\n plt.clf()\n fig = plt.figure(figsize=(24, 8), dpi=150)\n fig.suptitle('Active Accounts Per Platform (long view)')\n ax1 = fig.add_subplot(111)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.plot(dates, players_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')\n ax1.plot(dates, players_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')\n ax1.set_xticks(dates)\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n fig.tight_layout()\n fig.autofmt_xdate()\n # ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n fig.savefig(PLAYERSLONG_PNG)\n del fig\n # Battles PNG\n plt.clf()\n fig = plt.figure(figsize=(24, 8), dpi=150)\n fig.suptitle('Total Battles Per Platform (long view)')\n ax1 = fig.add_subplot(111)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.plot(dates, battles_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')\n ax1.plot(dates, battles_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')\n ax1.set_xticks(dates)\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n fig.tight_layout()\n fig.autofmt_xdate()\n # ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n fig.savefig(BATTLESLONG_PNG)\n del fig\n # Average PNG\n plt.clf()\n fig = plt.figure(figsize=(24, 8), dpi=150)\n fig.suptitle('Average Battles Played Per Account Per Platform (long view)')\n ax1 = fig.add_subplot(111)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.plot(dates, average_battles_per_day_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')\n ax1.plot(dates, average_battles_per_day_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')\n ax1.set_xticks(dates)\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n fig.tight_layout()\n fig.autofmt_xdate()\n # ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n fig.savefig(AVERAGELONG_PNG)\n del fig\n\n\ndef upload_long_term_charts(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n playerslong = api.media_upload(PLAYERSLONG_PNG)\n battleslong = api.media_upload(BATTLESLONG_PNG)\n averagelong = api.media_upload(AVERAGELONG_PNG)\n api.update_status(\n status='Long-term view of active accounts, with downtime and multi-day catchup errors omitted',\n media_ids=[playerslong.media_id, battleslong.media_id, averagelong.media_id]\n )\n\n\ndef upload_long_term_mode_charts(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n modelong = api.media_upload(MODEBREAKDOWNLONG_PNG)\n percentlong = api.media_upload(MODEBREAKDOWNPERCENTLONG_PNG)\n api.update_status(\n status='Long-term view of battles per mode',\n media_ids=[modelong.media_id, percentlong.media_id]\n )\n\n\ndef upload_activity_graphs_to_twitter(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n battles = api.media_upload(BATTLES_PNG)\n players = api.media_upload(PLAYERS_PNG)\n newplayers = api.media_upload(NEWPLAYERS_PNG)\n averages = api.media_upload(AVERAGE_PNG)\n api.update_status(\n status=config['twitter']['message'],\n media_ids=[players.media_id, battles.media_id, newplayers.media_id, averages.media_id]\n )\n\n\ndef upload_account_age_graph_to_twitter(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n accountage = api.media_upload(ACCOUNTAGE_PNG)\n api.update_status(\n status='Breakdown of active accounts by age per platform on #worldoftanksconsole',\n media_ids=[accountage.media_id]\n )\n\n\ndef upload_accounts_by_battles_chart_to_twitter(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n battlerange = api.media_upload(BATTLERANGE_PNG)\n api.update_status(\n status='Breakdown of accounts by number of battles played on #worldoftanksconsole',\n media_ids=[battlerange.media_id]\n )\n\n\ndef upload_five_battles_minimum_chart_to_twitter(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n fiveaday = api.media_upload(FIVEADAY_PNG)\n api.update_status(\n status='Filtering accounts per day with 5 battles minimum on #worldoftanksconsole',\n media_ids=[fiveaday.media_id]\n )\n\n\ndef share_unique_with_twitter(config, unique):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n status = 'Unique Active Accounts For {} Over Time\\n{}'\n formatting = '{} days: {}'\n for key, values in unique.items():\n api.update_status(\n status=status.format(\n key,\n '\\n'.join(map(lambda l: formatting.format(\n config['unique'][values.index(l)], l), values))\n )\n )\n\n\ndef build_cw_tanks_list(config):\n api = 'https://api-console.worldoftanks.com/wotx/encyclopedia/vehicles/'\n params = {\n 'application_id': config['wg api key'],\n 'fields': 'era,tank_id'\n }\n data = get(api, params=params).json()['data']\n return ' OR '.join(\n list(\n map(\n lambda t: 'tank_id:{}'.format(t['tank_id']),\n filter(lambda t: t['era'] != '', data.values())\n )\n )\n )\n\n\ndef query_es_for_top_tanks(config, era):\n now = datetime.utcnow()\n then = now - timedelta(days=1)\n es = Elasticsearch(**config['elasticsearch'])\n if era == 'ww2':\n query = ww2_popular_tanks_query\n elif era == 'cw':\n query = cw_popular_tanks_query\n # Setup query\n query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n # Query Elasticsearch\n response = es.search(index=config['tank index'], body=query)\n buckets = {\n 'xbox': OrderedDict(),\n 'ps': OrderedDict()\n }\n for bucket in response['aggregations']['2']['buckets']:\n for subbucket in bucket['4']['buckets']:\n key = subbucket['key']\n for tank in subbucket['3']['buckets']:\n buckets[key][tank['key']] = int(tank['1']['value'])\n return buckets\n\n\ndef query_for_tank_info(tanks):\n url = 'https://wotconsole.ru/api/tankopedia/en/{}.json'\n new_tanks = {\n 'xbox': OrderedDict(),\n 'ps': OrderedDict()\n }\n for plat, t in tanks.items():\n for tank, battles in t.items():\n response = get(url.format(tank))\n new_tanks[plat][response.json()['info']['user_string']] = battles\n new_tanks['playstation'] = new_tanks['ps']\n del new_tanks['ps']\n return new_tanks\n\n\ndef share_top_tanks(config, era, top, day):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n for platform, tanks in top.items():\n status = \"Most used {} tanks on {} for {}\\n{}\"\n formatting = '{}: {} battles'\n api.update_status(\n status=status.format(\n era,\n platform.capitalize(),\n day,\n '\\n'.join([formatting.format(tank, battles) for tank, battles in tanks.items()])\n )\n )\n\n\ndef query_es_for_mode_battles_difference(config, long_term=False):\n now = datetime.utcnow()\n then = now - timedelta(days=config['days'] if not long_term else config['long term'])\n es = Elasticsearch(**config['elasticsearch'])\n # Setup query\n battles_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n battles_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n cw_popular_tanks_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')\n cw_popular_tanks_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')\n # Query Elasticsearch\n total_battles_response = es.search(index=config['battle index'], body=battles_query)\n cw_battles_response = es.search(index=config['tank index'], body=cw_popular_tanks_query)\n dates = [b['key_as_string'].split('T')[0] for b in total_battles_response[\n 'aggregations']['2']['buckets']]\n # Filter numbers\n ww2_battles_xbox = OrderedDict()\n ww2_battles_ps = OrderedDict()\n cw_battles_xbox = OrderedDict()\n cw_battles_ps = OrderedDict()\n percent_cw_xbox = OrderedDict()\n percent_cw_ps = OrderedDict()\n for d in dates:\n ww2_battles_xbox[d] = 0\n ww2_battles_ps[d] = 0\n cw_battles_xbox[d] = 0\n cw_battles_ps[d] = 0\n percent_cw_xbox[d] = None\n percent_cw_ps[d] = None\n for bucket in total_battles_response['aggregations']['2']['buckets']:\n if not bucket['3']['buckets']:\n continue\n for subbucket in bucket['3']['buckets']:\n if subbucket['key'] == 'xbox':\n ww2_battles_xbox[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']\n else:\n ww2_battles_ps[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']\n for bucket in cw_battles_response['aggregations']['2']['buckets']:\n if not bucket['4']['buckets']:\n continue\n for subbucket in bucket['4']['buckets']:\n if subbucket['key'] == 'xbox':\n cw_battles_xbox[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']\n else:\n cw_battles_ps[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']\n for i in range(len(dates)):\n percent_cw_xbox[dates[i]] = cw_battles_xbox[dates[i]] / ww2_battles_xbox[dates[i]]\n percent_cw_ps[dates[i]] = cw_battles_ps[dates[i]] / ww2_battles_ps[dates[i]]\n ww2_battles_xbox[dates[i]] = ww2_battles_xbox[dates[i]] - cw_battles_xbox[dates[i]]\n ww2_battles_ps[dates[i]] = ww2_battles_ps[dates[i]] - cw_battles_ps[dates[i]]\n return dates, list(ww2_battles_xbox.values()), list(ww2_battles_ps.values()), list(cw_battles_xbox.values()), list(cw_battles_ps.values()), list(percent_cw_xbox.values()), list(percent_cw_ps.values())\n\n\ndef create_mode_difference_graph(dates, ww2_battles_xbox, ww2_battles_ps, cw_battles_xbox, cw_battles_ps, percent_cw_xbox, percent_cw_ps, long_term=False, watermark_text='@WOTC_Tracker'):\n shifted_dates = [(datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') for d in dates]\n # Mode PNG\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150) if not long_term else plt.figure(figsize=(24, 8), dpi=150)\n fig.suptitle('Estimated breakdown of battles between CW and WW2, per platform' if not long_term else 'Estimated breakdown of battles between CW and WW2, per platform (long term)')\n # ax1 = plt.axes()\n ax1 = fig.add_subplot(111)\n ax1.tick_params(axis='x', labelrotation=45)\n ax1.ticklabel_format(useOffset=False, style='plain')\n ax1.set_xticklabels(shifted_dates, ha='right')\n ax1.plot(shifted_dates, ww2_battles_xbox, color='darkgreen', linewidth=2, label='WW2: Xbox')\n ax1.plot(shifted_dates, cw_battles_xbox, color='lightgreen', linewidth=2, label='CW: Xbox')\n ax1.plot(shifted_dates, ww2_battles_ps, color='darkblue', linewidth=2, label='WW2: Playstation')\n ax1.plot(shifted_dates, cw_battles_ps, color='lightblue', linewidth=2, label='CW: Playstation')\n ax1.set_ylim(bottom=0)\n # for i in range(len(shifted_dates)):\n # xbox_text = ax1.annotate(annotations_xbox[i], (shifted_dates[i], ww2_battles_xbox[i]), verticalalignment='bottom', size=12 if not long_term else 8)\n # ps_text = ax1.annotate(annotations_ps[i], (shifted_dates[i], ww2_battles_ps[i]), verticalalignment='bottom', size=12 if not long_term else 8)\n # xbox_text.set_rotation(90)\n # ps_text.set_rotation(90)\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(MODEBREAKDOWN_PNG if not long_term else MODEBREAKDOWNLONG_PNG)\n del fig\n # Mode Percent PNG\n plt.clf()\n fig = plt.figure(figsize=(11, 8), dpi=150) if not long_term else plt.figure(figsize=(24, 8), dpi=150)\n fig.suptitle('Estimated percentage of battles taking place in CW, per platform' if not long_term else 'Estimated percentage of battles taking place in CW, per platform (long term)')\n # ax1 = plt.axes()\n ax1 = fig.add_subplot(111)\n ax1.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))\n ax1.tick_params(axis='x', labelrotation=45)\n ax1.set_xticklabels(shifted_dates, ha='right')\n ax1.plot(shifted_dates, percent_cw_xbox, color='green', linewidth=2, label='Xbox')\n ax1.plot(shifted_dates, percent_cw_ps, color='blue', linewidth=2, label='Playstation')\n ax1.grid()\n ax1.legend()\n ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)\n fig.savefig(MODEBREAKDOWNPERCENT_PNG if not long_term else MODEBREAKDOWNPERCENTLONG_PNG)\n del fig\n\n\ndef upload_mode_breakdown_to_twitter(config):\n auth = OAuthHandler(\n config['twitter']['api key'],\n config['twitter']['api secret key'])\n auth.set_access_token(\n config['twitter']['access token'],\n config['twitter']['access token secret'])\n api = API(auth)\n battles = api.media_upload(MODEBREAKDOWN_PNG)\n percent = api.media_upload(MODEBREAKDOWNPERCENT_PNG)\n api.update_status(\n status=\"Estimated split between WW2 and CW battles\",\n media_ids=[battles.media_id, percent.media_id]\n )\n\n\ndef get_universal_params(config):\n params = dict()\n watermark = config.get('watermark text', None)\n if watermark:\n params['watermark_text'] = watermark\n return params\n\n\nif __name__ == '__main__':\n agp = ArgumentParser(\n description='Bot for processing tracker data and uploading to Twitter')\n agp.add_argument('config', help='Config file location')\n agp.add_argument('-u', '--upload', help='Upload to twitter', action='store_true')\n agp.add_argument('--activity-graphs', action='store_true')\n agp.add_argument('--account-age', action='store_true')\n agp.add_argument('--accounts-by-battles', action='store_true')\n agp.add_argument('--five-battles-min', action='store_true')\n agp.add_argument('--long-term', action='store_true')\n agp.add_argument('--share-unique', action='store_true')\n agp.add_argument('--top-cw-tanks', action='store_true')\n agp.add_argument('--top-ww2-tanks', action='store_true')\n agp.add_argument('--mode-breakdown', action='store_true')\n args = agp.parse_args()\n config = manage_config('read', args.config)\n additional_params = get_universal_params(config)\n now = datetime.utcnow()\n if args.top_cw_tanks or args.top_ww2_tanks or args.mode_breakdown or args.long_term:\n CW_TANKS = build_cw_tanks_list(config)\n cw_popular_tanks_query['query']['bool']['must'][0]['query_string']['query'] = CW_TANKS\n ww2_popular_tanks_query['query']['bool']['must'][0]['query_string']['query'] = 'NOT (' + CW_TANKS + ')'\n if args.activity_graphs:\n try:\n create_activity_graphs(*query_es_for_graphs(config), **additional_params)\n if args.upload:\n upload_activity_graphs_to_twitter(config)\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.account_age:\n try:\n create_account_age_chart(query_es_for_active_accounts(config), **additional_params)\n if args.upload:\n upload_account_age_graph_to_twitter(config)\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.accounts_by_battles:\n try:\n create_accounts_by_battles_chart(query_es_for_accounts_by_battles(config), **additional_params)\n if args.upload:\n upload_accounts_by_battles_chart_to_twitter(config)\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.five_battles_min:\n try:\n create_five_battles_minimum_chart(query_five_battles_a_day_minimum(config), **additional_params)\n if args.upload:\n upload_five_battles_minimum_chart_to_twitter(config)\n except Exception as e:\n # print(e)\n traceback.print_exc()\n # Limit long-term views to beginning of month to review previous month's history\n if args.long_term:\n if now.day == 1:\n try:\n create_long_term_charts(*query_long_term_data(config, config.get('omit errors long term', True)), **additional_params)\n create_mode_difference_graph(*query_es_for_mode_battles_difference(config, long_term=True), long_term=True, **additional_params)\n if args.upload:\n upload_long_term_charts(config)\n upload_long_term_mode_charts(config)\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.share_unique:\n try:\n share_unique_with_twitter(config, query_es_for_unique(config))\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.top_cw_tanks:\n try:\n share_top_tanks(config, 'CW', query_for_tank_info(query_es_for_top_tanks(config, 'cw')), (now - timedelta(days=1)).strftime('%Y-%m-%d'))\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.top_ww2_tanks:\n try:\n share_top_tanks(config, 'WW2', query_for_tank_info(query_es_for_top_tanks(config, 'ww2')), (now - timedelta(days=1)).strftime('%Y-%m-%d'))\n except Exception as e:\n # print(e)\n traceback.print_exc()\n if args.mode_breakdown:\n try:\n create_mode_difference_graph(*query_es_for_mode_battles_difference(config), **additional_params)\n if args.upload:\n upload_mode_breakdown_to_twitter(config)\n except Exception as e:\n # print(e)\n traceback.print_exc()\n" ]
[ [ "matplotlib.pyplot.get_cmap", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.figure", "matplotlib.ticker.PercentFormatter", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.clf", "matplotlib.pyplot.axes" ] ]
leakyH/PaddleDetection
[ "aa15eb945711baf248177a02d4d3dd3bd3abc4e8" ]
[ "ppdet/modeling/heads/s2anet_head.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py\n\nimport paddle\nfrom paddle import ParamAttr\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.nn.initializer import Normal, Constant\nfrom ppdet.core.workspace import register\nfrom ppdet.modeling import ops\nfrom ppdet.modeling import bbox_utils\nfrom ppdet.modeling.proposal_generator.target_layer import RBoxAssigner\nimport numpy as np\n\n\nclass S2ANetAnchorGenerator(nn.Layer):\n \"\"\"\n AnchorGenerator by paddle\n \"\"\"\n\n def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):\n super(S2ANetAnchorGenerator, self).__init__()\n self.base_size = base_size\n self.scales = paddle.to_tensor(scales)\n self.ratios = paddle.to_tensor(ratios)\n self.scale_major = scale_major\n self.ctr = ctr\n self.base_anchors = self.gen_base_anchors()\n\n @property\n def num_base_anchors(self):\n return self.base_anchors.shape[0]\n\n def gen_base_anchors(self):\n w = self.base_size\n h = self.base_size\n if self.ctr is None:\n x_ctr = 0.5 * (w - 1)\n y_ctr = 0.5 * (h - 1)\n else:\n x_ctr, y_ctr = self.ctr\n\n h_ratios = paddle.sqrt(self.ratios)\n w_ratios = 1 / h_ratios\n if self.scale_major:\n ws = (w * w_ratios[:] * self.scales[:]).reshape([-1])\n hs = (h * h_ratios[:] * self.scales[:]).reshape([-1])\n else:\n ws = (w * self.scales[:] * w_ratios[:]).reshape([-1])\n hs = (h * self.scales[:] * h_ratios[:]).reshape([-1])\n\n base_anchors = paddle.stack(\n [\n x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)\n ],\n axis=-1)\n base_anchors = paddle.round(base_anchors)\n return base_anchors\n\n def _meshgrid(self, x, y, row_major=True):\n yy, xx = paddle.meshgrid(y, x)\n yy = yy.reshape([-1])\n xx = xx.reshape([-1])\n if row_major:\n return xx, yy\n else:\n return yy, xx\n\n def forward(self, featmap_size, stride=16):\n # featmap_size*stride project it to original area\n\n feat_h = featmap_size[0]\n feat_w = featmap_size[1]\n shift_x = paddle.arange(0, feat_w, 1, 'int32') * stride\n shift_y = paddle.arange(0, feat_h, 1, 'int32') * stride\n shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n shifts = paddle.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)\n\n all_anchors = self.base_anchors[:, :] + shifts[:, :]\n all_anchors = all_anchors.reshape([feat_h * feat_w, 4])\n return all_anchors\n\n def valid_flags(self, featmap_size, valid_size):\n feat_h, feat_w = featmap_size\n valid_h, valid_w = valid_size\n assert valid_h <= feat_h and valid_w <= feat_w\n valid_x = paddle.zeros([feat_w], dtype='int32')\n valid_y = paddle.zeros([feat_h], dtype='int32')\n valid_x[:valid_w] = 1\n valid_y[:valid_h] = 1\n valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n valid = valid_xx & valid_yy\n valid = paddle.reshape(valid, [-1, 1])\n valid = paddle.expand(valid, [-1, self.num_base_anchors]).reshape([-1])\n return valid\n\n\nclass AlignConv(nn.Layer):\n def __init__(self, in_channels, out_channels, kernel_size=3, groups=1):\n super(AlignConv, self).__init__()\n self.kernel_size = kernel_size\n self.align_conv = paddle.vision.ops.DeformConv2D(\n in_channels,\n out_channels,\n kernel_size=self.kernel_size,\n padding=(self.kernel_size - 1) // 2,\n groups=groups,\n weight_attr=ParamAttr(initializer=Normal(0, 0.01)),\n bias_attr=None)\n\n @paddle.no_grad()\n def get_offset(self, anchors, featmap_size, stride):\n \"\"\"\n Args:\n anchors: [M,5] xc,yc,w,h,angle\n featmap_size: (feat_h, feat_w)\n stride: 8\n Returns:\n\n \"\"\"\n anchors = paddle.reshape(anchors, [-1, 5]) # (NA,5)\n dtype = anchors.dtype\n feat_h = featmap_size[0]\n feat_w = featmap_size[1]\n pad = (self.kernel_size - 1) // 2\n idx = paddle.arange(-pad, pad + 1, dtype=dtype)\n\n yy, xx = paddle.meshgrid(idx, idx)\n xx = paddle.reshape(xx, [-1])\n yy = paddle.reshape(yy, [-1])\n\n # get sampling locations of default conv\n xc = paddle.arange(0, feat_w, dtype=dtype)\n yc = paddle.arange(0, feat_h, dtype=dtype)\n yc, xc = paddle.meshgrid(yc, xc)\n\n xc = paddle.reshape(xc, [-1, 1])\n yc = paddle.reshape(yc, [-1, 1])\n x_conv = xc + xx\n y_conv = yc + yy\n\n # get sampling locations of anchors\n # x_ctr, y_ctr, w, h, a = np.unbind(anchors, dim=1)\n x_ctr = anchors[:, 0]\n y_ctr = anchors[:, 1]\n w = anchors[:, 2]\n h = anchors[:, 3]\n a = anchors[:, 4]\n\n x_ctr = paddle.reshape(x_ctr, [-1, 1])\n y_ctr = paddle.reshape(y_ctr, [-1, 1])\n w = paddle.reshape(w, [-1, 1])\n h = paddle.reshape(h, [-1, 1])\n a = paddle.reshape(a, [-1, 1])\n\n x_ctr = x_ctr / stride\n y_ctr = y_ctr / stride\n w_s = w / stride\n h_s = h / stride\n cos, sin = paddle.cos(a), paddle.sin(a)\n dw, dh = w_s / self.kernel_size, h_s / self.kernel_size\n x, y = dw * xx, dh * yy\n xr = cos * x - sin * y\n yr = sin * x + cos * y\n x_anchor, y_anchor = xr + x_ctr, yr + y_ctr\n # get offset filed\n offset_x = x_anchor - x_conv\n offset_y = y_anchor - y_conv\n offset = paddle.stack([offset_y, offset_x], axis=-1)\n offset = paddle.reshape(\n offset, [feat_h * feat_w, self.kernel_size * self.kernel_size * 2])\n offset = paddle.transpose(offset, [1, 0])\n offset = paddle.reshape(\n offset,\n [1, self.kernel_size * self.kernel_size * 2, feat_h, feat_w])\n return offset\n\n def forward(self, x, refine_anchors, featmap_size, stride):\n offset = self.get_offset(refine_anchors, featmap_size, stride)\n x = F.relu(self.align_conv(x, offset))\n return x\n\n\n@register\nclass S2ANetHead(nn.Layer):\n \"\"\"\n S2Anet head\n Args:\n stacked_convs (int): number of stacked_convs\n feat_in (int): input channels of feat\n feat_out (int): output channels of feat\n num_classes (int): num_classes\n anchor_strides (list): stride of anchors\n anchor_scales (list): scale of anchors\n anchor_ratios (list): ratios of anchors\n target_means (list): target_means\n target_stds (list): target_stds\n align_conv_type (str): align_conv_type ['Conv', 'AlignConv']\n align_conv_size (int): kernel size of align_conv\n use_sigmoid_cls (bool): use sigmoid_cls or not\n reg_loss_weight (list): loss weight for regression\n \"\"\"\n __shared__ = ['num_classes']\n __inject__ = ['anchor_assign']\n\n def __init__(self,\n stacked_convs=2,\n feat_in=256,\n feat_out=256,\n num_classes=15,\n anchor_strides=[8, 16, 32, 64, 128],\n anchor_scales=[4],\n anchor_ratios=[1.0],\n target_means=0.0,\n target_stds=1.0,\n align_conv_type='AlignConv',\n align_conv_size=3,\n use_sigmoid_cls=True,\n anchor_assign=RBoxAssigner().__dict__,\n reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1],\n cls_loss_weight=[1.1, 1.05],\n reg_loss_type='l1'):\n super(S2ANetHead, self).__init__()\n self.stacked_convs = stacked_convs\n self.feat_in = feat_in\n self.feat_out = feat_out\n self.anchor_list = None\n self.anchor_scales = anchor_scales\n self.anchor_ratios = anchor_ratios\n self.anchor_strides = anchor_strides\n self.anchor_strides = paddle.to_tensor(anchor_strides)\n self.anchor_base_sizes = list(anchor_strides)\n self.means = paddle.ones(shape=[5]) * target_means\n self.stds = paddle.ones(shape=[5]) * target_stds\n assert align_conv_type in ['AlignConv', 'Conv', 'DCN']\n self.align_conv_type = align_conv_type\n self.align_conv_size = align_conv_size\n\n self.use_sigmoid_cls = use_sigmoid_cls\n self.cls_out_channels = num_classes if self.use_sigmoid_cls else 1\n self.sampling = False\n self.anchor_assign = anchor_assign\n self.reg_loss_weight = reg_loss_weight\n self.cls_loss_weight = cls_loss_weight\n self.alpha = 1.0\n self.beta = 1.0\n self.reg_loss_type = reg_loss_type\n self.s2anet_head_out = None\n\n # anchor\n self.anchor_generators = []\n for anchor_base in self.anchor_base_sizes:\n self.anchor_generators.append(\n S2ANetAnchorGenerator(anchor_base, anchor_scales,\n anchor_ratios))\n\n self.anchor_generators = nn.LayerList(self.anchor_generators)\n self.fam_cls_convs = nn.Sequential()\n self.fam_reg_convs = nn.Sequential()\n\n for i in range(self.stacked_convs):\n chan_in = self.feat_in if i == 0 else self.feat_out\n\n self.fam_cls_convs.add_sublayer(\n 'fam_cls_conv_{}'.format(i),\n nn.Conv2D(\n in_channels=chan_in,\n out_channels=self.feat_out,\n kernel_size=3,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0))))\n\n self.fam_cls_convs.add_sublayer('fam_cls_conv_{}_act'.format(i),\n nn.ReLU())\n\n self.fam_reg_convs.add_sublayer(\n 'fam_reg_conv_{}'.format(i),\n nn.Conv2D(\n in_channels=chan_in,\n out_channels=self.feat_out,\n kernel_size=3,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0))))\n\n self.fam_reg_convs.add_sublayer('fam_reg_conv_{}_act'.format(i),\n nn.ReLU())\n\n self.fam_reg = nn.Conv2D(\n self.feat_out,\n 5,\n 1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0)))\n prior_prob = 0.01\n bias_init = float(-np.log((1 - prior_prob) / prior_prob))\n self.fam_cls = nn.Conv2D(\n self.feat_out,\n self.cls_out_channels,\n 1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(bias_init)))\n\n if self.align_conv_type == \"AlignConv\":\n self.align_conv = AlignConv(self.feat_out, self.feat_out,\n self.align_conv_size)\n elif self.align_conv_type == \"Conv\":\n self.align_conv = nn.Conv2D(\n self.feat_out,\n self.feat_out,\n self.align_conv_size,\n padding=(self.align_conv_size - 1) // 2,\n bias_attr=ParamAttr(initializer=Constant(0)))\n\n elif self.align_conv_type == \"DCN\":\n self.align_conv_offset = nn.Conv2D(\n self.feat_out,\n 2 * self.align_conv_size**2,\n 1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0)))\n\n self.align_conv = paddle.vision.ops.DeformConv2D(\n self.feat_out,\n self.feat_out,\n self.align_conv_size,\n padding=(self.align_conv_size - 1) // 2,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=False)\n\n self.or_conv = nn.Conv2D(\n self.feat_out,\n self.feat_out,\n kernel_size=3,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0)))\n\n # ODM\n self.odm_cls_convs = nn.Sequential()\n self.odm_reg_convs = nn.Sequential()\n\n for i in range(self.stacked_convs):\n ch_in = self.feat_out\n # ch_in = int(self.feat_out / 8) if i == 0 else self.feat_out\n\n self.odm_cls_convs.add_sublayer(\n 'odm_cls_conv_{}'.format(i),\n nn.Conv2D(\n in_channels=ch_in,\n out_channels=self.feat_out,\n kernel_size=3,\n stride=1,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0))))\n\n self.odm_cls_convs.add_sublayer('odm_cls_conv_{}_act'.format(i),\n nn.ReLU())\n\n self.odm_reg_convs.add_sublayer(\n 'odm_reg_conv_{}'.format(i),\n nn.Conv2D(\n in_channels=self.feat_out,\n out_channels=self.feat_out,\n kernel_size=3,\n stride=1,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0))))\n\n self.odm_reg_convs.add_sublayer('odm_reg_conv_{}_act'.format(i),\n nn.ReLU())\n\n self.odm_cls = nn.Conv2D(\n self.feat_out,\n self.cls_out_channels,\n 3,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(bias_init)))\n self.odm_reg = nn.Conv2D(\n self.feat_out,\n 5,\n 3,\n padding=1,\n weight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\n bias_attr=ParamAttr(initializer=Constant(0)))\n\n self.featmap_sizes = []\n self.base_anchors_list = []\n self.refine_anchor_list = []\n\n def forward(self, feats):\n fam_reg_branch_list = []\n fam_cls_branch_list = []\n\n odm_reg_branch_list = []\n odm_cls_branch_list = []\n\n self.featmap_sizes_list = []\n self.base_anchors_list = []\n self.refine_anchor_list = []\n\n for feat_idx in range(len(feats)):\n feat = feats[feat_idx]\n fam_cls_feat = self.fam_cls_convs(feat)\n\n fam_cls = self.fam_cls(fam_cls_feat)\n # [N, CLS, H, W] --> [N, H, W, CLS]\n fam_cls = fam_cls.transpose([0, 2, 3, 1])\n fam_cls_reshape = paddle.reshape(\n fam_cls, [fam_cls.shape[0], -1, self.cls_out_channels])\n fam_cls_branch_list.append(fam_cls_reshape)\n\n fam_reg_feat = self.fam_reg_convs(feat)\n\n fam_reg = self.fam_reg(fam_reg_feat)\n # [N, 5, H, W] --> [N, H, W, 5]\n fam_reg = fam_reg.transpose([0, 2, 3, 1])\n fam_reg_reshape = paddle.reshape(fam_reg, [fam_reg.shape[0], -1, 5])\n fam_reg_branch_list.append(fam_reg_reshape)\n\n # prepare anchor\n featmap_size = (paddle.shape(feat)[2], paddle.shape(feat)[3])\n self.featmap_sizes_list.append(featmap_size)\n init_anchors = self.anchor_generators[feat_idx](\n featmap_size, self.anchor_strides[feat_idx])\n\n init_anchors = paddle.to_tensor(init_anchors, dtype='float32')\n NA = featmap_size[0] * featmap_size[1]\n init_anchors = paddle.reshape(init_anchors, [NA, 4])\n init_anchors = self.rect2rbox(init_anchors)\n self.base_anchors_list.append(init_anchors)\n\n if self.training:\n refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors)\n else:\n refine_anchor = self.bbox_decode(fam_reg, init_anchors)\n\n self.refine_anchor_list.append(refine_anchor)\n\n if self.align_conv_type == 'AlignConv':\n align_feat = self.align_conv(feat,\n refine_anchor.clone(),\n featmap_size,\n self.anchor_strides[feat_idx])\n elif self.align_conv_type == 'DCN':\n align_offset = self.align_conv_offset(feat)\n align_feat = self.align_conv(feat, align_offset)\n elif self.align_conv_type == 'Conv':\n align_feat = self.align_conv(feat)\n\n or_feat = self.or_conv(align_feat)\n odm_reg_feat = or_feat\n odm_cls_feat = or_feat\n\n odm_reg_feat = self.odm_reg_convs(odm_reg_feat)\n odm_cls_feat = self.odm_cls_convs(odm_cls_feat)\n\n odm_cls_score = self.odm_cls(odm_cls_feat)\n # [N, CLS, H, W] --> [N, H, W, CLS]\n odm_cls_score = odm_cls_score.transpose([0, 2, 3, 1])\n odm_cls_score_shape = odm_cls_score.shape\n odm_cls_score_reshape = paddle.reshape(odm_cls_score, [\n odm_cls_score_shape[0], odm_cls_score_shape[1] *\n odm_cls_score_shape[2], self.cls_out_channels\n ])\n\n odm_cls_branch_list.append(odm_cls_score_reshape)\n\n odm_bbox_pred = self.odm_reg(odm_reg_feat)\n # [N, 5, H, W] --> [N, H, W, 5]\n odm_bbox_pred = odm_bbox_pred.transpose([0, 2, 3, 1])\n odm_bbox_pred_reshape = paddle.reshape(odm_bbox_pred, [-1, 5])\n odm_bbox_pred_reshape = paddle.unsqueeze(\n odm_bbox_pred_reshape, axis=0)\n odm_reg_branch_list.append(odm_bbox_pred_reshape)\n\n self.s2anet_head_out = (fam_cls_branch_list, fam_reg_branch_list,\n odm_cls_branch_list, odm_reg_branch_list)\n return self.s2anet_head_out\n\n def get_prediction(self, nms_pre=2000):\n refine_anchors = self.refine_anchor_list\n fam_cls_branch_list = self.s2anet_head_out[0]\n fam_reg_branch_list = self.s2anet_head_out[1]\n odm_cls_branch_list = self.s2anet_head_out[2]\n odm_reg_branch_list = self.s2anet_head_out[3]\n pred_scores, pred_bboxes = self.get_bboxes(\n odm_cls_branch_list, odm_reg_branch_list, refine_anchors, nms_pre,\n self.cls_out_channels, self.use_sigmoid_cls)\n return pred_scores, pred_bboxes\n\n def smooth_l1_loss(self, pred, label, delta=1.0 / 9.0):\n \"\"\"\n Args:\n pred: pred score\n label: label\n delta: delta\n Returns: loss\n \"\"\"\n assert pred.shape == label.shape and label.numel() > 0\n assert delta > 0\n diff = paddle.abs(pred - label)\n loss = paddle.where(diff < delta, 0.5 * diff * diff / delta,\n diff - 0.5 * delta)\n return loss\n\n def get_fam_loss(self, fam_target, s2anet_head_out, reg_loss_type='gwd'):\n (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes,\n pos_inds, neg_inds) = fam_target\n fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out\n\n fam_cls_losses = []\n fam_bbox_losses = []\n st_idx = 0\n num_total_samples = len(pos_inds) + len(\n neg_inds) if self.sampling else len(pos_inds)\n num_total_samples = max(1, num_total_samples)\n\n for idx, feat_size in enumerate(self.featmap_sizes_list):\n feat_anchor_num = feat_size[0] * feat_size[1]\n\n # step1: get data\n feat_labels = labels[st_idx:st_idx + feat_anchor_num]\n feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]\n\n feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]\n feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]\n\n # step2: calc cls loss\n feat_labels = feat_labels.reshape(-1)\n feat_label_weights = feat_label_weights.reshape(-1)\n\n fam_cls_score = fam_cls_branch_list[idx]\n fam_cls_score = paddle.squeeze(fam_cls_score, axis=0)\n fam_cls_score1 = fam_cls_score\n\n feat_labels = paddle.to_tensor(feat_labels)\n feat_labels_one_hot = paddle.nn.functional.one_hot(\n feat_labels, self.cls_out_channels + 1)\n feat_labels_one_hot = feat_labels_one_hot[:, 1:]\n feat_labels_one_hot.stop_gradient = True\n\n num_total_samples = paddle.to_tensor(\n num_total_samples, dtype='float32', stop_gradient=True)\n\n fam_cls = F.sigmoid_focal_loss(\n fam_cls_score1,\n feat_labels_one_hot,\n normalizer=num_total_samples,\n reduction='none')\n\n feat_label_weights = feat_label_weights.reshape(\n feat_label_weights.shape[0], 1)\n feat_label_weights = np.repeat(\n feat_label_weights, self.cls_out_channels, axis=1)\n feat_label_weights = paddle.to_tensor(\n feat_label_weights, stop_gradient=True)\n\n fam_cls = fam_cls * feat_label_weights\n fam_cls_total = paddle.sum(fam_cls)\n fam_cls_losses.append(fam_cls_total)\n\n # step3: regression loss\n feat_bbox_targets = paddle.to_tensor(\n feat_bbox_targets, dtype='float32', stop_gradient=True)\n feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])\n\n fam_bbox_pred = fam_reg_branch_list[idx]\n fam_bbox_pred = paddle.squeeze(fam_bbox_pred, axis=0)\n fam_bbox_pred = paddle.reshape(fam_bbox_pred, [-1, 5])\n fam_bbox = self.smooth_l1_loss(fam_bbox_pred, feat_bbox_targets)\n loss_weight = paddle.to_tensor(\n self.reg_loss_weight, dtype='float32', stop_gradient=True)\n fam_bbox = paddle.multiply(fam_bbox, loss_weight)\n feat_bbox_weights = paddle.to_tensor(\n feat_bbox_weights, stop_gradient=True)\n\n if reg_loss_type == 'l1':\n fam_bbox = fam_bbox * feat_bbox_weights\n fam_bbox_total = paddle.sum(fam_bbox) / num_total_samples\n elif reg_loss_type == 'iou' or reg_loss_type == 'gwd':\n fam_bbox = paddle.sum(fam_bbox, axis=-1)\n feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1)\n try:\n from rbox_iou_ops import rbox_iou\n except Exception as e:\n print(\"import custom_ops error, try install rbox_iou_ops \" \\\n \"following ppdet/ext_op/README.md\", e)\n sys.stdout.flush()\n sys.exit(-1)\n # calc iou\n fam_bbox_decode = self.delta2rbox(self.base_anchors_list[idx],\n fam_bbox_pred)\n bbox_gt_bboxes = paddle.to_tensor(\n bbox_gt_bboxes,\n dtype=fam_bbox_decode.dtype,\n place=fam_bbox_decode.place)\n bbox_gt_bboxes.stop_gradient = True\n iou = rbox_iou(fam_bbox_decode, bbox_gt_bboxes)\n iou = paddle.diag(iou)\n\n if reg_loss_type == 'gwd':\n bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx +\n feat_anchor_num, :]\n fam_bbox_total = self.gwd_loss(fam_bbox_decode,\n bbox_gt_bboxes_level)\n fam_bbox_total = fam_bbox_total * feat_bbox_weights\n fam_bbox_total = paddle.sum(\n fam_bbox_total) / num_total_samples\n\n fam_bbox_losses.append(fam_bbox_total)\n st_idx += feat_anchor_num\n\n fam_cls_loss = paddle.add_n(fam_cls_losses)\n fam_cls_loss_weight = paddle.to_tensor(\n self.cls_loss_weight[0], dtype='float32', stop_gradient=True)\n fam_cls_loss = fam_cls_loss * fam_cls_loss_weight\n fam_reg_loss = paddle.add_n(fam_bbox_losses)\n return fam_cls_loss, fam_reg_loss\n\n def get_odm_loss(self, odm_target, s2anet_head_out, reg_loss_type='gwd'):\n (labels, label_weights, bbox_targets, bbox_weights, bbox_gt_bboxes,\n pos_inds, neg_inds) = odm_target\n fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out\n\n odm_cls_losses = []\n odm_bbox_losses = []\n st_idx = 0\n num_total_samples = len(pos_inds) + len(\n neg_inds) if self.sampling else len(pos_inds)\n num_total_samples = max(1, num_total_samples)\n\n for idx, feat_size in enumerate(self.featmap_sizes_list):\n feat_anchor_num = feat_size[0] * feat_size[1]\n\n # step1: get data\n feat_labels = labels[st_idx:st_idx + feat_anchor_num]\n feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]\n\n feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]\n feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]\n\n # step2: calc cls loss\n feat_labels = feat_labels.reshape(-1)\n feat_label_weights = feat_label_weights.reshape(-1)\n\n odm_cls_score = odm_cls_branch_list[idx]\n odm_cls_score = paddle.squeeze(odm_cls_score, axis=0)\n odm_cls_score1 = odm_cls_score\n\n feat_labels = paddle.to_tensor(feat_labels)\n feat_labels_one_hot = paddle.nn.functional.one_hot(\n feat_labels, self.cls_out_channels + 1)\n feat_labels_one_hot = feat_labels_one_hot[:, 1:]\n feat_labels_one_hot.stop_gradient = True\n\n num_total_samples = paddle.to_tensor(\n num_total_samples, dtype='float32', stop_gradient=True)\n odm_cls = F.sigmoid_focal_loss(\n odm_cls_score1,\n feat_labels_one_hot,\n normalizer=num_total_samples,\n reduction='none')\n\n feat_label_weights = feat_label_weights.reshape(\n feat_label_weights.shape[0], 1)\n feat_label_weights = np.repeat(\n feat_label_weights, self.cls_out_channels, axis=1)\n feat_label_weights = paddle.to_tensor(feat_label_weights)\n feat_label_weights.stop_gradient = True\n\n odm_cls = odm_cls * feat_label_weights\n odm_cls_total = paddle.sum(odm_cls)\n odm_cls_losses.append(odm_cls_total)\n\n # # step3: regression loss\n feat_bbox_targets = paddle.to_tensor(\n feat_bbox_targets, dtype='float32')\n feat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])\n feat_bbox_targets.stop_gradient = True\n\n odm_bbox_pred = odm_reg_branch_list[idx]\n odm_bbox_pred = paddle.squeeze(odm_bbox_pred, axis=0)\n odm_bbox_pred = paddle.reshape(odm_bbox_pred, [-1, 5])\n odm_bbox = self.smooth_l1_loss(odm_bbox_pred, feat_bbox_targets)\n\n loss_weight = paddle.to_tensor(\n self.reg_loss_weight, dtype='float32', stop_gradient=True)\n odm_bbox = paddle.multiply(odm_bbox, loss_weight)\n feat_bbox_weights = paddle.to_tensor(\n feat_bbox_weights, stop_gradient=True)\n\n if reg_loss_type == 'l1':\n odm_bbox = odm_bbox * feat_bbox_weights\n odm_bbox_total = paddle.sum(odm_bbox) / num_total_samples\n elif reg_loss_type == 'iou' or reg_loss_type == 'gwd':\n odm_bbox = paddle.sum(odm_bbox, axis=-1)\n feat_bbox_weights = paddle.sum(feat_bbox_weights, axis=-1)\n try:\n from rbox_iou_ops import rbox_iou\n except Exception as e:\n print(\"import custom_ops error, try install rbox_iou_ops \" \\\n \"following ppdet/ext_op/README.md\", e)\n sys.stdout.flush()\n sys.exit(-1)\n # calc iou\n odm_bbox_decode = self.delta2rbox(self.refine_anchor_list[idx],\n odm_bbox_pred)\n bbox_gt_bboxes = paddle.to_tensor(\n bbox_gt_bboxes,\n dtype=odm_bbox_decode.dtype,\n place=odm_bbox_decode.place)\n bbox_gt_bboxes.stop_gradient = True\n iou = rbox_iou(odm_bbox_decode, bbox_gt_bboxes)\n iou = paddle.diag(iou)\n\n if reg_loss_type == 'gwd':\n bbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx +\n feat_anchor_num, :]\n odm_bbox_total = self.gwd_loss(odm_bbox_decode,\n bbox_gt_bboxes_level)\n odm_bbox_total = odm_bbox_total * feat_bbox_weights\n odm_bbox_total = paddle.sum(\n odm_bbox_total) / num_total_samples\n\n odm_bbox_losses.append(odm_bbox_total)\n st_idx += feat_anchor_num\n\n odm_cls_loss = paddle.add_n(odm_cls_losses)\n odm_cls_loss_weight = paddle.to_tensor(\n self.cls_loss_weight[1], dtype='float32', stop_gradient=True)\n odm_cls_loss = odm_cls_loss * odm_cls_loss_weight\n odm_reg_loss = paddle.add_n(odm_bbox_losses)\n return odm_cls_loss, odm_reg_loss\n\n def get_loss(self, inputs):\n # inputs: im_id image im_shape scale_factor gt_bbox gt_class is_crowd\n\n # compute loss\n fam_cls_loss_lst = []\n fam_reg_loss_lst = []\n odm_cls_loss_lst = []\n odm_reg_loss_lst = []\n\n im_shape = inputs['im_shape']\n for im_id in range(im_shape.shape[0]):\n np_im_shape = inputs['im_shape'][im_id].numpy()\n np_scale_factor = inputs['scale_factor'][im_id].numpy()\n # data_format: (xc, yc, w, h, theta)\n gt_bboxes = inputs['gt_rbox'][im_id].numpy()\n gt_labels = inputs['gt_class'][im_id].numpy()\n is_crowd = inputs['is_crowd'][im_id].numpy()\n gt_labels = gt_labels + 1\n\n # featmap_sizes\n anchors_list_all = np.concatenate(self.base_anchors_list)\n\n # get im_feat\n fam_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[0]]\n fam_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[1]]\n odm_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[2]]\n odm_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[3]]\n im_s2anet_head_out = (fam_cls_feats_list, fam_reg_feats_list,\n odm_cls_feats_list, odm_reg_feats_list)\n\n # FAM\n im_fam_target = self.anchor_assign(anchors_list_all, gt_bboxes,\n gt_labels, is_crowd)\n if im_fam_target is not None:\n im_fam_cls_loss, im_fam_reg_loss = self.get_fam_loss(\n im_fam_target, im_s2anet_head_out, self.reg_loss_type)\n fam_cls_loss_lst.append(im_fam_cls_loss)\n fam_reg_loss_lst.append(im_fam_reg_loss)\n\n # ODM\n np_refine_anchors_list = paddle.concat(\n self.refine_anchor_list).numpy()\n np_refine_anchors_list = np.concatenate(np_refine_anchors_list)\n np_refine_anchors_list = np_refine_anchors_list.reshape(-1, 5)\n im_odm_target = self.anchor_assign(np_refine_anchors_list,\n gt_bboxes, gt_labels, is_crowd)\n\n if im_odm_target is not None:\n im_odm_cls_loss, im_odm_reg_loss = self.get_odm_loss(\n im_odm_target, im_s2anet_head_out, self.reg_loss_type)\n odm_cls_loss_lst.append(im_odm_cls_loss)\n odm_reg_loss_lst.append(im_odm_reg_loss)\n fam_cls_loss = paddle.add_n(fam_cls_loss_lst)\n fam_reg_loss = paddle.add_n(fam_reg_loss_lst)\n odm_cls_loss = paddle.add_n(odm_cls_loss_lst)\n odm_reg_loss = paddle.add_n(odm_reg_loss_lst)\n return {\n 'fam_cls_loss': fam_cls_loss,\n 'fam_reg_loss': fam_reg_loss,\n 'odm_cls_loss': odm_cls_loss,\n 'odm_reg_loss': odm_reg_loss\n }\n\n def get_bboxes(self, cls_score_list, bbox_pred_list, mlvl_anchors, nms_pre,\n cls_out_channels, use_sigmoid_cls):\n assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)\n\n mlvl_bboxes = []\n mlvl_scores = []\n\n idx = 0\n for cls_score, bbox_pred, anchors in zip(cls_score_list, bbox_pred_list,\n mlvl_anchors):\n cls_score = paddle.reshape(cls_score, [-1, cls_out_channels])\n if use_sigmoid_cls:\n scores = F.sigmoid(cls_score)\n else:\n scores = F.softmax(cls_score, axis=-1)\n\n # bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 5)\n bbox_pred = paddle.transpose(bbox_pred, [1, 2, 0])\n bbox_pred = paddle.reshape(bbox_pred, [-1, 5])\n anchors = paddle.reshape(anchors, [-1, 5])\n\n if scores.shape[0] > nms_pre:\n # Get maximum scores for foreground classes.\n if use_sigmoid_cls:\n max_scores = paddle.max(scores, axis=1)\n else:\n max_scores = paddle.max(scores[:, 1:], axis=1)\n\n topk_val, topk_inds = paddle.topk(max_scores, nms_pre)\n anchors = paddle.gather(anchors, topk_inds)\n bbox_pred = paddle.gather(bbox_pred, topk_inds)\n scores = paddle.gather(scores, topk_inds)\n\n bbox_delta = paddle.reshape(bbox_pred, [-1, 5])\n bboxes = self.delta2rbox(anchors, bbox_delta)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n\n idx += 1\n\n mlvl_bboxes = paddle.concat(mlvl_bboxes, axis=0)\n mlvl_scores = paddle.concat(mlvl_scores)\n\n return mlvl_scores, mlvl_bboxes\n\n def rect2rbox(self, bboxes):\n \"\"\"\n :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)\n :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)\n \"\"\"\n bboxes = paddle.reshape(bboxes, [-1, 4])\n num_boxes = paddle.shape(bboxes)[0]\n x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0\n y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0\n edges1 = paddle.abs(bboxes[:, 2] - bboxes[:, 0])\n edges2 = paddle.abs(bboxes[:, 3] - bboxes[:, 1])\n\n rbox_w = paddle.maximum(edges1, edges2)\n rbox_h = paddle.minimum(edges1, edges2)\n\n # set angle\n inds = edges1 < edges2\n inds = paddle.cast(inds, 'int32')\n rboxes_angle = inds * np.pi / 2.0\n\n rboxes = paddle.stack(\n (x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1)\n return rboxes\n\n # deltas to rbox\n def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-6):\n \"\"\"\n :param rrois: (cx, cy, w, h, theta)\n :param deltas: (dx, dy, dw, dh, dtheta)\n :param means: means of anchor\n :param stds: stds of anchor\n :param wh_ratio_clip: clip threshold of wh_ratio\n :return:\n \"\"\"\n deltas = paddle.reshape(deltas, [-1, 5])\n rrois = paddle.reshape(rrois, [-1, 5])\n # fix dy2st bug denorm_deltas = deltas * self.stds + self.means\n denorm_deltas = paddle.add(\n paddle.multiply(deltas, self.stds), self.means)\n\n dx = denorm_deltas[:, 0]\n dy = denorm_deltas[:, 1]\n dw = denorm_deltas[:, 2]\n dh = denorm_deltas[:, 3]\n dangle = denorm_deltas[:, 4]\n max_ratio = np.abs(np.log(wh_ratio_clip))\n dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)\n dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)\n\n rroi_x = rrois[:, 0]\n rroi_y = rrois[:, 1]\n rroi_w = rrois[:, 2]\n rroi_h = rrois[:, 3]\n rroi_angle = rrois[:, 4]\n\n gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(\n rroi_angle) + rroi_x\n gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(\n rroi_angle) + rroi_y\n gw = rroi_w * dw.exp()\n gh = rroi_h * dh.exp()\n ga = np.pi * dangle + rroi_angle\n ga = (ga + np.pi / 4) % np.pi - np.pi / 4\n ga = paddle.to_tensor(ga)\n gw = paddle.to_tensor(gw, dtype='float32')\n gh = paddle.to_tensor(gh, dtype='float32')\n bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)\n return bboxes\n\n def bbox_decode(self, bbox_preds, anchors):\n \"\"\"decode bbox from deltas\n Args:\n bbox_preds: [N,H,W,5]\n anchors: [H*W,5]\n return:\n bboxes: [N,H,W,5]\n \"\"\"\n num_imgs, H, W, _ = bbox_preds.shape\n bbox_delta = paddle.reshape(bbox_preds, [-1, 5])\n bboxes = self.delta2rbox(anchors, bbox_delta)\n return bboxes\n\n def trace(self, A):\n tr = paddle.diagonal(A, axis1=-2, axis2=-1)\n tr = paddle.sum(tr, axis=-1)\n return tr\n\n def sqrt_newton_schulz_autograd(self, A, numIters):\n A_shape = A.shape\n batchSize = A_shape[0]\n dim = A_shape[1]\n\n normA = A * A\n normA = paddle.sum(normA, axis=1)\n normA = paddle.sum(normA, axis=1)\n normA = paddle.sqrt(normA)\n normA1 = normA.reshape([batchSize, 1, 1])\n Y = paddle.divide(A, paddle.expand_as(normA1, A))\n I = paddle.eye(dim, dim).reshape([1, dim, dim])\n l0 = []\n for i in range(batchSize):\n l0.append(I)\n I = paddle.concat(l0, axis=0)\n I.stop_gradient = False\n Z = paddle.eye(dim, dim).reshape([1, dim, dim])\n l1 = []\n for i in range(batchSize):\n l1.append(Z)\n Z = paddle.concat(l1, axis=0)\n Z.stop_gradient = False\n\n for i in range(numIters):\n T = 0.5 * (3.0 * I - Z.bmm(Y))\n Y = Y.bmm(T)\n Z = T.bmm(Z)\n sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1])\n sA = paddle.expand_as(sA, A)\n return sA\n\n def wasserstein_distance_sigma(sigma1, sigma2):\n wasserstein_distance_item2 = paddle.matmul(\n sigma1, sigma1) + paddle.matmul(\n sigma2, sigma2) - 2 * self.sqrt_newton_schulz_autograd(\n paddle.matmul(\n paddle.matmul(sigma1, paddle.matmul(sigma2, sigma2)),\n sigma1), 10)\n wasserstein_distance_item2 = self.trace(wasserstein_distance_item2)\n\n return wasserstein_distance_item2\n\n def xywhr2xyrs(self, xywhr):\n xywhr = paddle.reshape(xywhr, [-1, 5])\n xy = xywhr[:, :2]\n wh = paddle.clip(xywhr[:, 2:4], min=1e-7, max=1e7)\n r = xywhr[:, 4]\n cos_r = paddle.cos(r)\n sin_r = paddle.sin(r)\n R = paddle.stack(\n (cos_r, -sin_r, sin_r, cos_r), axis=-1).reshape([-1, 2, 2])\n S = 0.5 * paddle.nn.functional.diag_embed(wh)\n return xy, R, S\n\n def gwd_loss(self,\n pred,\n target,\n fun='log',\n tau=1.0,\n alpha=1.0,\n normalize=False):\n\n xy_p, R_p, S_p = self.xywhr2xyrs(pred)\n xy_t, R_t, S_t = self.xywhr2xyrs(target)\n\n xy_distance = (xy_p - xy_t).square().sum(axis=-1)\n\n Sigma_p = R_p.matmul(S_p.square()).matmul(R_p.transpose([0, 2, 1]))\n Sigma_t = R_t.matmul(S_t.square()).matmul(R_t.transpose([0, 2, 1]))\n\n whr_distance = paddle.diagonal(\n S_p, axis1=-2, axis2=-1).square().sum(axis=-1)\n\n whr_distance = whr_distance + paddle.diagonal(\n S_t, axis1=-2, axis2=-1).square().sum(axis=-1)\n _t = Sigma_p.matmul(Sigma_t)\n\n _t_tr = paddle.diagonal(_t, axis1=-2, axis2=-1).sum(axis=-1)\n _t_det_sqrt = paddle.diagonal(S_p, axis1=-2, axis2=-1).prod(axis=-1)\n _t_det_sqrt = _t_det_sqrt * paddle.diagonal(\n S_t, axis1=-2, axis2=-1).prod(axis=-1)\n whr_distance = whr_distance + (-2) * (\n (_t_tr + 2 * _t_det_sqrt).clip(0).sqrt())\n\n distance = (xy_distance + alpha * alpha * whr_distance).clip(0)\n\n if normalize:\n wh_p = pred[..., 2:4].clip(min=1e-7, max=1e7)\n wh_t = target[..., 2:4].clip(min=1e-7, max=1e7)\n scale = ((wh_p.log() + wh_t.log()).sum(dim=-1) / 4).exp()\n distance = distance / scale\n\n if fun == 'log':\n distance = paddle.log1p(distance)\n\n if tau >= 1.0:\n return 1 - 1 / (tau + distance)\n\n return distance\n" ]
[ [ "numpy.repeat", "numpy.log", "numpy.concatenate" ] ]
isayev/torchani
[ "f8edffe384e2cb2eebe3a7e04faa01b6f5e26b37" ]
[ "examples/nnp_training.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n.. _training-example:\n\nTrain Your Own Neural Network Potential\n=======================================\n\nThis example shows how to use TorchANI to train a neural network potential\nwith the setup identical to NeuroChem. We will use the same configuration as\nspecified in `inputtrain.ipt`_\n\n.. _`inputtrain.ipt`:\n https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/inputtrain.ipt\n\n.. note::\n TorchANI provide tools to run NeuroChem training config file `inputtrain.ipt`.\n See: :ref:`neurochem-training`.\n\"\"\"\n\n###############################################################################\n# To begin with, let's first import the modules and setup devices we will use:\n\nimport torch\nimport torchani\nimport os\nimport math\nimport torch.utils.tensorboard\nimport tqdm\n\n# helper function to convert energy unit from Hartree to kcal/mol\nfrom torchani.units import hartree2kcalmol\n\n# device to run the training\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n###############################################################################\n# Now let's setup constants and construct an AEV computer. These numbers could\n# be found in `rHCNO-5.2R_16-3.5A_a4-8.params`\n# The atomic self energies given in `sae_linfit.dat`_ are computed from ANI-1x\n# dataset. These constants can be calculated for any given dataset if ``None``\n# is provided as an argument to the object of :class:`EnergyShifter` class.\n#\n# .. note::\n#\n# Besides defining these hyperparameters programmatically,\n# :mod:`torchani.neurochem` provide tools to read them from file.\n#\n# .. _rHCNO-5.2R_16-3.5A_a4-8.params:\n# https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/rHCNO-5.2R_16-3.5A_a4-8.params\n# .. _sae_linfit.dat:\n# https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/sae_linfit.dat\n\nRcr = 5.2000e+00\nRca = 3.5000e+00\nEtaR = torch.tensor([1.6000000e+01], device=device)\nShfR = torch.tensor([9.0000000e-01, 1.1687500e+00, 1.4375000e+00, 1.7062500e+00, 1.9750000e+00, 2.2437500e+00, 2.5125000e+00, 2.7812500e+00, 3.0500000e+00, 3.3187500e+00, 3.5875000e+00, 3.8562500e+00, 4.1250000e+00, 4.3937500e+00, 4.6625000e+00, 4.9312500e+00], device=device)\nZeta = torch.tensor([3.2000000e+01], device=device)\nShfZ = torch.tensor([1.9634954e-01, 5.8904862e-01, 9.8174770e-01, 1.3744468e+00, 1.7671459e+00, 2.1598449e+00, 2.5525440e+00, 2.9452431e+00], device=device)\nEtaA = torch.tensor([8.0000000e+00], device=device)\nShfA = torch.tensor([9.0000000e-01, 1.5500000e+00, 2.2000000e+00, 2.8500000e+00], device=device)\nnum_species = 4\naev_computer = torchani.AEVComputer(Rcr, Rca, EtaR, ShfR, EtaA, Zeta, ShfA, ShfZ, num_species)\nenergy_shifter = torchani.utils.EnergyShifter(None)\nspecies_to_tensor = torchani.utils.ChemicalSymbolsToInts(['H', 'C', 'N', 'O'])\n\n###############################################################################\n# Now let's setup datasets. These paths assumes the user run this script under\n# the ``examples`` directory of TorchANI's repository. If you download this\n# script, you should manually set the path of these files in your system before\n# this script can run successfully.\n#\n# Also note that we need to subtracting energies by the self energies of all\n# atoms for each molecule. This makes the range of energies in a reasonable\n# range. The second argument defines how to convert species as a list of string\n# to tensor, that is, for all supported chemical symbols, which is correspond to\n# ``0``, which correspond to ``1``, etc.\n\ntry:\n path = os.path.dirname(os.path.realpath(__file__))\nexcept NameError:\n path = os.getcwd()\ndspath = os.path.join(path, '../dataset/ani1-up_to_gdb4/ani_gdb_s01.h5')\nbatch_size = 2560\n\ntraining, validation = torchani.data.load(dspath).subtract_self_energies(energy_shifter).species_to_indices().shuffle().split(0.8, None)\ntraining = training.collate(batch_size).cache()\nvalidation = validation.collate(batch_size).cache()\nprint('Self atomic energies: ', energy_shifter.self_energies)\n\n###############################################################################\n# When iterating the dataset, we will get a dict of name->property mapping\n#\n###############################################################################\n# Now let's define atomic neural networks.\n\nH_network = torch.nn.Sequential(\n torch.nn.Linear(384, 160),\n torch.nn.CELU(0.1),\n torch.nn.Linear(160, 128),\n torch.nn.CELU(0.1),\n torch.nn.Linear(128, 96),\n torch.nn.CELU(0.1),\n torch.nn.Linear(96, 1)\n)\n\nC_network = torch.nn.Sequential(\n torch.nn.Linear(384, 144),\n torch.nn.CELU(0.1),\n torch.nn.Linear(144, 112),\n torch.nn.CELU(0.1),\n torch.nn.Linear(112, 96),\n torch.nn.CELU(0.1),\n torch.nn.Linear(96, 1)\n)\n\nN_network = torch.nn.Sequential(\n torch.nn.Linear(384, 128),\n torch.nn.CELU(0.1),\n torch.nn.Linear(128, 112),\n torch.nn.CELU(0.1),\n torch.nn.Linear(112, 96),\n torch.nn.CELU(0.1),\n torch.nn.Linear(96, 1)\n)\n\nO_network = torch.nn.Sequential(\n torch.nn.Linear(384, 128),\n torch.nn.CELU(0.1),\n torch.nn.Linear(128, 112),\n torch.nn.CELU(0.1),\n torch.nn.Linear(112, 96),\n torch.nn.CELU(0.1),\n torch.nn.Linear(96, 1)\n)\n\nnn = torchani.ANIModel([H_network, C_network, N_network, O_network])\nprint(nn)\n\n###############################################################################\n# Initialize the weights and biases.\n#\n# .. note::\n# Pytorch default initialization for the weights and biases in linear layers\n# is Kaiming uniform. See: `TORCH.NN.MODULES.LINEAR`_\n# We initialize the weights similarly but from the normal distribution.\n# The biases were initialized to zero.\n#\n# .. _TORCH.NN.MODULES.LINEAR:\n# https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear\n\n\ndef init_params(m):\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.kaiming_normal_(m.weight, a=1.0)\n torch.nn.init.zeros_(m.bias)\n\n\nnn.apply(init_params)\n\n###############################################################################\n# Let's now create a pipeline of AEV Computer --> Neural Networks.\nmodel = torchani.nn.Sequential(aev_computer, nn).to(device)\n\n###############################################################################\n# Now let's setup the optimizers. NeuroChem uses Adam with decoupled weight decay\n# to updates the weights and Stochastic Gradient Descent (SGD) to update the biases.\n# Moreover, we need to specify different weight decay rate for different layes.\n#\n# .. note::\n#\n# The weight decay in `inputtrain.ipt`_ is named \"l2\", but it is actually not\n# L2 regularization. The confusion between L2 and weight decay is a common\n# mistake in deep learning. See: `Decoupled Weight Decay Regularization`_\n# Also note that the weight decay only applies to weight in the training\n# of ANI models, not bias.\n#\n# .. _Decoupled Weight Decay Regularization:\n# https://arxiv.org/abs/1711.05101\n\nAdamW = torchani.optim.AdamW([\n # H networks\n {'params': [H_network[0].weight]},\n {'params': [H_network[2].weight], 'weight_decay': 0.00001},\n {'params': [H_network[4].weight], 'weight_decay': 0.000001},\n {'params': [H_network[6].weight]},\n # C networks\n {'params': [C_network[0].weight]},\n {'params': [C_network[2].weight], 'weight_decay': 0.00001},\n {'params': [C_network[4].weight], 'weight_decay': 0.000001},\n {'params': [C_network[6].weight]},\n # N networks\n {'params': [N_network[0].weight]},\n {'params': [N_network[2].weight], 'weight_decay': 0.00001},\n {'params': [N_network[4].weight], 'weight_decay': 0.000001},\n {'params': [N_network[6].weight]},\n # O networks\n {'params': [O_network[0].weight]},\n {'params': [O_network[2].weight], 'weight_decay': 0.00001},\n {'params': [O_network[4].weight], 'weight_decay': 0.000001},\n {'params': [O_network[6].weight]},\n])\n\nSGD = torch.optim.SGD([\n # H networks\n {'params': [H_network[0].bias]},\n {'params': [H_network[2].bias]},\n {'params': [H_network[4].bias]},\n {'params': [H_network[6].bias]},\n # C networks\n {'params': [C_network[0].bias]},\n {'params': [C_network[2].bias]},\n {'params': [C_network[4].bias]},\n {'params': [C_network[6].bias]},\n # N networks\n {'params': [N_network[0].bias]},\n {'params': [N_network[2].bias]},\n {'params': [N_network[4].bias]},\n {'params': [N_network[6].bias]},\n # O networks\n {'params': [O_network[0].bias]},\n {'params': [O_network[2].bias]},\n {'params': [O_network[4].bias]},\n {'params': [O_network[6].bias]},\n], lr=1e-3)\n\n###############################################################################\n# Setting up a learning rate scheduler to do learning rate decay\nAdamW_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(AdamW, factor=0.5, patience=100, threshold=0)\nSGD_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(SGD, factor=0.5, patience=100, threshold=0)\n\n###############################################################################\n# Train the model by minimizing the MSE loss, until validation RMSE no longer\n# improves during a certain number of steps, decay the learning rate and repeat\n# the same process, stop until the learning rate is smaller than a threshold.\n#\n# We first read the checkpoint files to restart training. We use `latest.pt`\n# to store current training state.\nlatest_checkpoint = 'latest.pt'\n\n###############################################################################\n# Resume training from previously saved checkpoints:\nif os.path.isfile(latest_checkpoint):\n checkpoint = torch.load(latest_checkpoint)\n nn.load_state_dict(checkpoint['nn'])\n AdamW.load_state_dict(checkpoint['AdamW'])\n SGD.load_state_dict(checkpoint['SGD'])\n AdamW_scheduler.load_state_dict(checkpoint['AdamW_scheduler'])\n SGD_scheduler.load_state_dict(checkpoint['SGD_scheduler'])\n\n###############################################################################\n# During training, we need to validate on validation set and if validation error\n# is better than the best, then save the new best model to a checkpoint\n\n\ndef validate():\n # run validation\n mse_sum = torch.nn.MSELoss(reduction='sum')\n total_mse = 0.0\n count = 0\n for properties in validation:\n species = properties['species'].to(device)\n coordinates = properties['coordinates'].to(device).float()\n true_energies = properties['energies'].to(device).float()\n _, predicted_energies = model((species, coordinates))\n total_mse += mse_sum(predicted_energies, true_energies).item()\n count += predicted_energies.shape[0]\n return hartree2kcalmol(math.sqrt(total_mse / count))\n\n\n###############################################################################\n# We will also use TensorBoard to visualize our training process\ntensorboard = torch.utils.tensorboard.SummaryWriter()\n\n###############################################################################\n# Finally, we come to the training loop.\n#\n# In this tutorial, we are setting the maximum epoch to a very small number,\n# only to make this demo terminate fast. For serious training, this should be\n# set to a much larger value\nmse = torch.nn.MSELoss(reduction='none')\n\nprint(\"training starting from epoch\", AdamW_scheduler.last_epoch + 1)\nmax_epochs = 10\nearly_stopping_learning_rate = 1.0E-5\nbest_model_checkpoint = 'best.pt'\n\nfor _ in range(AdamW_scheduler.last_epoch + 1, max_epochs):\n rmse = validate()\n print('RMSE:', rmse, 'at epoch', AdamW_scheduler.last_epoch + 1)\n\n learning_rate = AdamW.param_groups[0]['lr']\n\n if learning_rate < early_stopping_learning_rate:\n break\n\n # checkpoint\n if AdamW_scheduler.is_better(rmse, AdamW_scheduler.best):\n torch.save(nn.state_dict(), best_model_checkpoint)\n\n AdamW_scheduler.step(rmse)\n SGD_scheduler.step(rmse)\n\n tensorboard.add_scalar('validation_rmse', rmse, AdamW_scheduler.last_epoch)\n tensorboard.add_scalar('best_validation_rmse', AdamW_scheduler.best, AdamW_scheduler.last_epoch)\n tensorboard.add_scalar('learning_rate', learning_rate, AdamW_scheduler.last_epoch)\n\n for i, properties in tqdm.tqdm(\n enumerate(training),\n total=len(training),\n desc=\"epoch {}\".format(AdamW_scheduler.last_epoch)\n ):\n species = properties['species'].to(device)\n coordinates = properties['coordinates'].to(device).float()\n true_energies = properties['energies'].to(device).float()\n num_atoms = (species >= 0).sum(dim=1, dtype=true_energies.dtype)\n _, predicted_energies = model((species, coordinates))\n\n loss = (mse(predicted_energies, true_energies) / num_atoms.sqrt()).mean()\n\n AdamW.zero_grad()\n SGD.zero_grad()\n loss.backward()\n AdamW.step()\n SGD.step()\n\n # write current batch loss to TensorBoard\n tensorboard.add_scalar('batch_loss', loss, AdamW_scheduler.last_epoch * len(training) + i)\n\n torch.save({\n 'nn': nn.state_dict(),\n 'AdamW': AdamW.state_dict(),\n 'SGD': SGD.state_dict(),\n 'AdamW_scheduler': AdamW_scheduler.state_dict(),\n 'SGD_scheduler': SGD_scheduler.state_dict(),\n }, latest_checkpoint)\n" ]
[ [ "torch.nn.Linear", "torch.nn.MSELoss", "torch.nn.CELU", "torch.optim.SGD", "torch.nn.init.kaiming_normal_", "torch.cuda.is_available", "torch.tensor", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "torch.nn.init.zeros_", "torch.utils.tensorboard.SummaryWriter" ] ]
rwill128/baselines
[ "24dd0c80db01623bb1224ab044b64da3fbec63cc" ]
[ "baselines/deepq/build_graph.py" ]
[ "\"\"\"Deep Q learning graph\n\nThe functions in this file can are used to create the following functions:\n\n======= act ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative no update happens\n (default: no update)\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= act (in case of parameter noise) ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon to a new value, if negative no update happens\n (default: no update)\n reset_ph: bool\n reset the perturbed policy by sampling a new perturbation\n update_param_noise_threshold_ph: float\n the desired threshold for the difference between non-perturbed and perturbed policy\n update_param_noise_scale_ph: bool\n whether or not to update the scale of the noise for the next time it is re-perturbed\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= train =======\n\n Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:\n\n td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))\n loss = huber_loss[td_error]\n\n Parameters\n ----------\n obs_t: object\n a batch of observations\n action: np.array\n actions that were selected upon seeing obs_t.\n dtype must be int32 and shape must be (batch_size,)\n reward: np.array\n immediate reward attained after executing those actions\n dtype must be float32 and shape must be (batch_size,)\n obs_tp1: object\n observations that followed obs_t\n done: np.array\n 1 if obs_t was the last observation in the episode and 0 otherwise\n obs_tp1 gets ignored, but must be of the valid shape.\n dtype must be float32 and shape must be (batch_size,)\n weight: np.array\n imporance weights for every element of the batch (gradient is multiplied\n by the importance weight) dtype must be float32 and shape must be (batch_size,)\n\n Returns\n -------\n td_error: np.array\n a list of differences between Q(s,a) and the target in Bellman's equation.\n dtype is float32 and shape is (batch_size,)\n\n======= update_target ========\n\n copy the parameters from optimized Q function to the target Q function.\n In Q learning we actually optimize the following error:\n\n Q(s,a) - (r + gamma * max_a' Q'(s', a'))\n\n Where Q' is lagging behind Q to stablize the learning. For example for Atari\n\n Q' is set to Q once every 10000 updates training steps.\n\n\"\"\"\nimport tensorflow as tf\nimport baselines.common.tf_util as U\n\n\ndef scope_vars(scope, trainable_only=False):\n \"\"\"\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables that were marked as trainable.\n Returns\n -------\n vars: [tf.Variable]\n list of variables in `scope`.\n \"\"\"\n return tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,\n scope=scope if isinstance(scope, str) else scope.name\n )\n\n\ndef scope_name():\n \"\"\"Returns the name of current scope as a string, e.g. deepq/q_func\"\"\"\n return tf.compat.v1.get_variable_scope().name\n\n\ndef absolute_scope_name(relative_scope_name):\n \"\"\"Appends parent scope name to `relative_scope_name`\"\"\"\n return scope_name() + \"/\" + relative_scope_name\n\n\ndef default_param_noise_filter(var):\n if var not in tf.compat.v1.trainable_variables():\n # We never perturb non-trainable vars.\n return False\n if \"fully_connected\" in var.name:\n # We perturb fully-connected layers.\n return True\n\n # The remaining layers are likely conv or layer norm layers, which we do not wish to\n # perturb (in the former case because they only extract features, in the latter case because\n # we use them for normalization purposes). If you change your network, you will likely want\n # to re-consider which layers to perturb and which to keep untouched.\n return False\n\n\ndef build_act(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.compat.v1.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n deterministic_actions = tf.argmax(q_values, axis=1)\n\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True},\n updates=[update_eps_expr])\n\n def act(ob, stochastic=True, update_eps=-1):\n return _act(ob, stochastic, update_eps)\n\n return act\n\n\ndef build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None,\n param_noise_filter_func=None):\n \"\"\"Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n if param_noise_filter_func is None:\n param_noise_filter_func = default_param_noise_filter\n\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name=\"update_eps\")\n update_param_noise_threshold_ph = tf.compat.v1.placeholder(tf.float32, (), name=\"update_param_noise_threshold\")\n update_param_noise_scale_ph = tf.compat.v1.placeholder(tf.bool, (), name=\"update_param_noise_scale\")\n reset_ph = tf.compat.v1.placeholder(tf.bool, (), name=\"reset\")\n\n eps = tf.compat.v1.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n param_noise_scale = tf.compat.v1.get_variable(\"param_noise_scale\", (),\n initializer=tf.constant_initializer(0.01), trainable=False)\n param_noise_threshold = tf.compat.v1.get_variable(\"param_noise_threshold\", (),\n initializer=tf.constant_initializer(0.05), trainable=False)\n\n # Unmodified Q.\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n\n # Perturbable Q used for the actual rollout.\n q_values_perturbed = q_func(observations_ph.get(), num_actions, scope=\"perturbed_q_func\")\n\n # We have to wrap this code into a function due to the way tf.cond() works. See\n # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for\n # a more detailed discussion.\n def perturb_vars(original_scope, perturbed_scope):\n all_vars = scope_vars(absolute_scope_name(original_scope))\n all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))\n assert len(all_vars) == len(all_perturbed_vars)\n perturb_ops = []\n for var, perturbed_var in zip(all_vars, all_perturbed_vars):\n if param_noise_filter_func(perturbed_var):\n # Perturb this variable.\n op = tf.compat.v1.assign(perturbed_var, var + tf.compat.v1.random_normal(shape=tf.shape(var), mean=0.,\n stddev=param_noise_scale))\n else:\n # Do not perturb, just assign.\n op = tf.assign(perturbed_var, var)\n perturb_ops.append(op)\n assert len(perturb_ops) == len(all_vars)\n return tf.group(*perturb_ops)\n\n # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy\n # of the network and measures the effect of that perturbation in action space. If the perturbation\n # is too big, reduce scale of perturbation, otherwise increase.\n q_values_adaptive = q_func(observations_ph.get(), num_actions, scope=\"adaptive_q_func\")\n perturb_for_adaption = perturb_vars(original_scope=\"q_func\", perturbed_scope=\"adaptive_q_func\")\n kl = tf.reduce_sum(\n tf.nn.softmax(q_values) * (tf.compat.v1.log(tf.nn.softmax(q_values)) - tf.compat.v1.log(tf.nn.softmax(q_values_adaptive))),\n axis=-1)\n mean_kl = tf.reduce_mean(kl)\n\n def update_scale():\n with tf.control_dependencies([perturb_for_adaption]):\n update_scale_expr = tf.cond(mean_kl < param_noise_threshold,\n lambda: param_noise_scale.assign(param_noise_scale * 1.01),\n lambda: param_noise_scale.assign(param_noise_scale / 1.01),\n )\n return update_scale_expr\n\n # Functionality to update the threshold for parameter space noise.\n update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,\n lambda: update_param_noise_threshold_ph,\n lambda: param_noise_threshold))\n\n # Put everything together.\n deterministic_actions = tf.argmax(q_values_perturbed, axis=1)\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n updates = [\n update_eps_expr,\n tf.cond(reset_ph, lambda: perturb_vars(original_scope=\"q_func\", perturbed_scope=\"perturbed_q_func\"),\n lambda: tf.group(*[])),\n tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),\n update_param_noise_threshold_expr,\n ]\n _act = U.function(\n inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph,\n update_param_noise_scale_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False,\n update_param_noise_scale_ph: False},\n updates=updates)\n\n def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True,\n update_eps=-1):\n return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)\n\n return act\n\n\ndef build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,\n double_q=True, scope=\"deepq\", reuse=None, param_noise=False, param_noise_filter_func=None):\n \"\"\"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.compat.v1.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n \"\"\"\n if param_noise:\n act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,\n param_noise_filter_func=param_noise_filter_func)\n else:\n act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)\n\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n # set up placeholders\n obs_t_input = make_obs_ph(\"obs_t\")\n act_t_ph = tf.compat.v1.placeholder(tf.int32, [None], name=\"action\")\n rew_t_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"reward\")\n obs_tp1_input = make_obs_ph(\"obs_tp1\")\n done_mask_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"done\")\n importance_weights_ph = tf.compat.v1.placeholder(tf.float32, [None], name=\"weight\")\n\n # q network evaluation\n q_t = q_func(obs_t_input.get(), num_actions, scope=\"q_func\", reuse=True) # reuse parameters from act\n q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,\n scope=tf.compat.v1.get_variable_scope().name + \"/q_func\")\n\n # target q network evalution\n q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope=\"target_q_func\")\n target_q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,\n scope=tf.compat.v1.get_variable_scope().name + \"/target_q_func\")\n\n # q scores for actions which we know were selected in the given state.\n q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)\n\n # compute estimate of best possible value starting from state at t + 1\n if double_q:\n q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope=\"q_func\", reuse=True)\n q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)\n q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)\n else:\n q_tp1_best = tf.reduce_max(q_tp1, 1)\n q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best\n\n # compute RHS of bellman equation\n q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked\n\n # compute the error (potentially clipped)\n td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)\n errors = U.huber_loss(td_error)\n weighted_error = tf.reduce_mean(importance_weights_ph * errors)\n\n # compute optimization op (potentially with gradient clipping)\n if grad_norm_clipping is not None:\n gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)\n optimize_expr = optimizer.apply_gradients(gradients)\n else:\n optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_expr = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_expr.append(var_target.assign(var))\n update_target_expr = tf.group(*update_target_expr)\n\n # Create callable functions\n train = U.function(\n inputs=[\n obs_t_input,\n act_t_ph,\n rew_t_ph,\n obs_tp1_input,\n done_mask_ph,\n importance_weights_ph\n ],\n outputs=td_error,\n updates=[optimize_expr]\n )\n update_target = U.function([], [], updates=[update_target_expr])\n\n q_values = U.function([obs_t_input], q_t)\n\n return act_f, train, update_target, {'q_values': q_values}\n" ]
[ [ "tensorflow.constant_initializer", "tensorflow.group", "tensorflow.compat.v1.get_variable_scope", "tensorflow.compat.v1.trainable_variables", "tensorflow.clip_by_norm", "tensorflow.stack", "tensorflow.control_dependencies", "tensorflow.nn.softmax", "tensorflow.one_hot", "tensorflow.compat.v1.placeholder", "tensorflow.shape", "tensorflow.argmax", "tensorflow.Variable", "tensorflow.where", "tensorflow.compat.v1.variable_scope", "tensorflow.assign", "tensorflow.cond", "tensorflow.reduce_max", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
eshan-rg/deep-neural-networks
[ "1f34ac6572b3d5e9dcb25edc888a0df094458c25" ]
[ "MNIST dataset - FCNN/FCNN.py" ]
[ "import keras\r\nimport numpy as np\r\n\r\nfrom keras.datasets import mnist\r\n(x_train,y_train),(x_test,y_test)=mnist.load_data()\r\n\r\nx_train = x_train.reshape(60000,784)\r\nx_test = x_test.reshape(10000,784)\r\nx_train = x_train/255.0\r\nx_test = x_test/255.0\r\n\r\nfrom keras.utils import to_categorical\r\ny_train = to_categorical(y_train,num_classes = 10)\r\ny_test = to_categorical(y_test,num_classes = 10)\r\n\r\nfrom keras.layers import Input, Dense, Activation\r\nfrom keras.models import Model\r\n\r\nimg_input = Input(shape=(784,))\r\nx = Dense(units = 30, activation = \"relu\")(img_input)\r\ny = Dense(units = 10, activation = \"sigmoid\")(x)\r\n\r\nmodel= Model(inputs = img_input, outputs=y)\r\nstringlist = []\r\nmodel.summary(print_fn=lambda x: stringlist.append(x))\r\nshort_model_summary = \"\\n\".join(stringlist)\r\nprint(short_model_summary)\r\n#print(model.summary)\r\n\r\nmodel.compile(optimizer=\"adam\",loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\r\nmodel.fit(x_train,y_train, batch_size=150,epochs=4, validation_split=0.2)\r\n\r\n\r\nprint(model.metrics_names)\r\nmodel.evaluate(x_test,y_test, batch_size = 128)\r\n\r\npreds=model.predict(x_test,batch_size = 125)\r\npreds = preds.argmax(axis = 1)\r\ny_test = y_test.argmax(axis = 1)\r\n\r\nprint(preds[:10])\r\nprint(y_test[:10])\r\n\r\nfrom sklearn.metrics import classification_report\r\nprint(classification_report(y_test, preds))" ]
[ [ "sklearn.metrics.classification_report" ] ]
daxiongshu/cupy
[ "a8dfcd66d89c8e66a60e4b7272f95a15c26fc907" ]
[ "tests/cupy_tests/sorting_tests/test_search.py" ]
[ "import unittest\n\nimport numpy\nimport pytest\n\nimport cupy\nimport cupy.core._accelerator as _acc\nfrom cupy.core import _cub_reduction\nfrom cupy import testing\n\n\n@testing.gpu\nclass TestSearch(unittest.TestCase):\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_all(self, xp, dtype):\n a = testing.shaped_random((2, 3), xp, dtype)\n return a.argmax()\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_external_argmax_all(self, xp, dtype):\n a = testing.shaped_random((2, 3), xp, dtype)\n return xp.argmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_argmax_nan(self, xp, dtype):\n a = xp.array([float('nan'), -1, 1], dtype)\n return a.argmax()\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_axis_large(self, xp, dtype):\n a = testing.shaped_random((3, 1000), xp, dtype)\n return a.argmax(axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_external_argmax_axis_large(self, xp, dtype):\n a = testing.shaped_random((3, 1000), xp, dtype)\n return xp.argmax(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_axis0(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return a.argmax(axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_axis1(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return a.argmax(axis=1)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_axis2(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return a.argmax(axis=2)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_tie(self, xp, dtype):\n a = xp.array([0, 5, 2, 3, 4, 5], dtype)\n return a.argmax()\n\n @testing.for_all_dtypes(no_complex=True)\n def test_argmax_zero_size(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n a.argmax()\n\n @testing.for_all_dtypes(no_complex=True)\n def test_argmax_zero_size_axis0(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n a.argmax(axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmax_zero_size_axis1(self, xp, dtype):\n a = testing.shaped_random((0, 1), xp, dtype)\n return a.argmax(axis=1)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_all(self, xp, dtype):\n a = testing.shaped_random((2, 3), xp, dtype)\n return a.argmin()\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_argmin_nan(self, xp, dtype):\n a = xp.array([float('nan'), -1, 1], dtype)\n return a.argmin()\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_external_argmin_all(self, xp, dtype):\n a = testing.shaped_random((2, 3), xp, dtype)\n return xp.argmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_axis_large(self, xp, dtype):\n a = testing.shaped_random((3, 1000), xp, dtype)\n return a.argmin(axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_external_argmin_axis_large(self, xp, dtype):\n a = testing.shaped_random((3, 1000), xp, dtype)\n return xp.argmin(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_axis0(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return a.argmin(axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_axis1(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return a.argmin(axis=1)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_axis2(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return a.argmin(axis=2)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_tie(self, xp, dtype):\n a = xp.array([0, 1, 2, 3, 0, 5], dtype)\n return a.argmin()\n\n @testing.for_all_dtypes(no_complex=True)\n def test_argmin_zero_size(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n return a.argmin()\n\n @testing.for_all_dtypes(no_complex=True)\n def test_argmin_zero_size_axis0(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n a.argmin(axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_argmin_zero_size_axis1(self, xp, dtype):\n a = testing.shaped_random((0, 1), xp, dtype)\n return a.argmin(axis=1)\n\n\n# This class compares CUB results against NumPy's\n# TODO(leofang): test axis after support is added\n@testing.parameterize(*testing.product({\n 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],\n 'order_and_axis': (('C', -1), ('C', None), ('F', 0), ('F', None)),\n 'backend': ('device', 'block'),\n}))\n@testing.gpu\n@unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled')\nclass TestCubReduction(unittest.TestCase):\n\n def setUp(self):\n self.order, self.axis = self.order_and_axis\n self.old_routine_accelerators = _acc.get_routine_accelerators()\n self.old_reduction_accelerators = _acc.get_reduction_accelerators()\n if self.backend == 'device':\n if self.axis is not None:\n raise unittest.SkipTest('does not support')\n _acc.set_routine_accelerators(['cub'])\n _acc.set_reduction_accelerators([])\n elif self.backend == 'block':\n _acc.set_routine_accelerators([])\n _acc.set_reduction_accelerators(['cub'])\n\n def tearDown(self):\n _acc.set_routine_accelerators(self.old_routine_accelerators)\n _acc.set_reduction_accelerators(self.old_reduction_accelerators)\n\n @testing.for_dtypes('bhilBHILefdFD')\n @testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)\n def test_cub_argmin(self, xp, dtype):\n a = testing.shaped_random(self.shape, xp, dtype)\n if self.order == 'C':\n a = xp.ascontiguousarray(a)\n else:\n a = xp.asfortranarray(a)\n\n if xp is numpy:\n return a.argmin(axis=self.axis)\n\n # xp is cupy, first ensure we really use CUB\n ret = cupy.empty(()) # Cython checks return type, need to fool it\n if self.backend == 'device':\n func_name = 'cupy.core._routines_statistics.cub.'\n func_name += 'device_reduce'\n with testing.AssertFunctionIsCalled(func_name, return_value=ret):\n a.argmin(axis=self.axis)\n elif self.backend == 'block':\n # this is the only function we can mock; the rest is cdef'd\n func_name = 'cupy.core._cub_reduction.'\n func_name += '_SimpleCubReductionKernel_get_cached_function'\n func = _cub_reduction._SimpleCubReductionKernel_get_cached_function\n if self.axis is not None and len(self.shape) > 1:\n times_called = 1 # one pass\n else:\n times_called = 2 # two passes\n with testing.AssertFunctionIsCalled(\n func_name, wraps=func, times_called=times_called):\n a.argmin(axis=self.axis)\n # ...then perform the actual computation\n return a.argmin(axis=self.axis)\n\n @testing.for_dtypes('bhilBHILefdFD')\n @testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)\n def test_cub_argmax(self, xp, dtype):\n a = testing.shaped_random(self.shape, xp, dtype)\n if self.order == 'C':\n a = xp.ascontiguousarray(a)\n else:\n a = xp.asfortranarray(a)\n\n if xp is numpy:\n return a.argmax(axis=self.axis)\n\n # xp is cupy, first ensure we really use CUB\n ret = cupy.empty(()) # Cython checks return type, need to fool it\n if self.backend == 'device':\n func_name = 'cupy.core._routines_statistics.cub.'\n func_name += 'device_reduce'\n with testing.AssertFunctionIsCalled(func_name, return_value=ret):\n a.argmax(axis=self.axis)\n elif self.backend == 'block':\n # this is the only function we can mock; the rest is cdef'd\n func_name = 'cupy.core._cub_reduction.'\n func_name += '_SimpleCubReductionKernel_get_cached_function'\n func = _cub_reduction._SimpleCubReductionKernel_get_cached_function\n if self.axis is not None and len(self.shape) > 1:\n times_called = 1 # one pass\n else:\n times_called = 2 # two passes\n with testing.AssertFunctionIsCalled(\n func_name, wraps=func, times_called=times_called):\n a.argmax(axis=self.axis)\n # ...then perform the actual computation\n return a.argmax(axis=self.axis)\n\n\n@testing.gpu\n@testing.parameterize(*testing.product({\n 'func': ['argmin', 'argmax'],\n 'is_module': [True, False],\n 'shape': [(3, 4), ()],\n}))\nclass TestArgMinMaxDtype(unittest.TestCase):\n\n @testing.for_dtypes(\n dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64],\n name='result_dtype')\n @testing.for_all_dtypes(name='in_dtype')\n def test_argminmax_dtype(self, in_dtype, result_dtype):\n a = testing.shaped_random(self.shape, cupy, in_dtype)\n if self.is_module:\n func = getattr(cupy, self.func)\n y = func(a, dtype=result_dtype)\n else:\n func = getattr(a, self.func)\n y = func(dtype=result_dtype)\n assert y.shape == ()\n assert y.dtype == result_dtype\n\n\n@testing.parameterize(\n {'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},\n {'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},\n {'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)},\n {'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)},\n)\n@testing.gpu\nclass TestWhereTwoArrays(unittest.TestCase):\n\n @testing.for_all_dtypes_combination(\n names=['cond_type', 'x_type', 'y_type'])\n @testing.numpy_cupy_allclose()\n def test_where_two_arrays(self, xp, cond_type, x_type, y_type):\n m = testing.shaped_random(self.cond_shape, xp, xp.bool_)\n # Almost all values of a matrix `shaped_random` makes are not zero.\n # To make a sparse matrix, we need multiply `m`.\n cond = testing.shaped_random(self.cond_shape, xp, cond_type) * m\n x = testing.shaped_random(self.x_shape, xp, x_type, seed=0)\n y = testing.shaped_random(self.y_shape, xp, y_type, seed=1)\n return xp.where(cond, x, y)\n\n\n@testing.parameterize(\n {'cond_shape': (2, 3, 4)},\n {'cond_shape': (4,)},\n {'cond_shape': (2, 3, 4)},\n {'cond_shape': (3, 4)},\n)\n@testing.gpu\nclass TestWhereCond(unittest.TestCase):\n\n @testing.for_all_dtypes()\n @testing.numpy_cupy_array_equal()\n def test_where_cond(self, xp, dtype):\n m = testing.shaped_random(self.cond_shape, xp, xp.bool_)\n cond = testing.shaped_random(self.cond_shape, xp, dtype) * m\n return xp.where(cond)\n\n\n@testing.gpu\nclass TestWhereError(unittest.TestCase):\n\n def test_one_argument(self):\n for xp in (numpy, cupy):\n cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_)\n x = testing.shaped_random((2, 3, 4), xp, xp.int32)\n with pytest.raises(ValueError):\n xp.where(cond, x)\n\n\n@testing.parameterize(\n {'array': numpy.random.randint(0, 2, (20,))},\n {'array': numpy.random.randn(3, 2, 4)},\n {'array': numpy.empty((0,))},\n {'array': numpy.empty((0, 2))},\n {'array': numpy.empty((0, 2, 0))},\n)\n@testing.gpu\nclass TestNonzero(unittest.TestCase):\n\n @testing.for_all_dtypes()\n @testing.numpy_cupy_array_equal()\n def test_nonzero(self, xp, dtype):\n array = xp.array(self.array, dtype=dtype)\n return xp.nonzero(array)\n\n\n@testing.parameterize(\n {'array': numpy.array(0)},\n {'array': numpy.array(1)},\n)\n@testing.gpu\n@testing.with_requires('numpy>=1.17.0')\nclass TestNonzeroZeroDimension(unittest.TestCase):\n\n @testing.for_all_dtypes()\n def test_nonzero(self, dtype):\n for xp in (numpy, cupy):\n array = xp.array(self.array, dtype=dtype)\n with pytest.raises(DeprecationWarning):\n xp.nonzero(array)\n\n\n@testing.parameterize(\n {'array': numpy.random.randint(0, 2, (20,))},\n {'array': numpy.random.randn(3, 2, 4)},\n {'array': numpy.array(0)},\n {'array': numpy.array(1)},\n {'array': numpy.empty((0,))},\n {'array': numpy.empty((0, 2))},\n {'array': numpy.empty((0, 2, 0))},\n)\n@testing.gpu\nclass TestFlatNonzero(unittest.TestCase):\n\n @testing.for_all_dtypes()\n @testing.numpy_cupy_array_equal()\n def test_flatnonzero(self, xp, dtype):\n array = xp.array(self.array, dtype=dtype)\n return xp.flatnonzero(array)\n\n\n@testing.parameterize(\n {'array': numpy.random.randint(0, 2, (20,))},\n {'array': numpy.random.randn(3, 2, 4)},\n {'array': numpy.empty((0,))},\n {'array': numpy.empty((0, 2))},\n {'array': numpy.empty((0, 2, 0))},\n)\n@testing.gpu\nclass TestArgwhere(unittest.TestCase):\n\n @testing.for_all_dtypes()\n @testing.numpy_cupy_array_equal()\n def test_argwhere(self, xp, dtype):\n array = xp.array(self.array, dtype=dtype)\n return xp.argwhere(array)\n\n\n@testing.parameterize(\n {'array': cupy.array(1)},\n)\n@testing.gpu\nclass TestArgwhereZeroDimension(unittest.TestCase):\n\n def test_argwhere(self):\n with testing.assert_warns(DeprecationWarning):\n return cupy.nonzero(self.array)\n\n\n@testing.gpu\nclass TestNanArgMin(unittest.TestCase):\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_all(self, xp, dtype):\n a = testing.shaped_random((2, 3), xp, dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmin_nan(self, xp, dtype):\n a = xp.array([float('nan'), -1, 1], dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmin_nan2(self, xp, dtype):\n a = xp.array([float('nan'), float('nan'), -1, 1], dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmin_nan3(self, xp, dtype):\n a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmin_nan4(self, xp, dtype):\n a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],\n dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmin_nan5(self, xp, dtype):\n a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],\n dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_axis_large(self, xp, dtype):\n a = testing.shaped_random((3, 1000), xp, dtype)\n return xp.nanargmin(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_axis0(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return xp.nanargmin(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_axis1(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return xp.nanargmin(a, axis=1)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_axis2(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return xp.nanargmin(a, axis=2)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_tie(self, xp, dtype):\n a = xp.array([0, 5, 2, 3, 4, 5], dtype)\n return xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n def test_nanargmin_zero_size(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n xp.nanargmin(a)\n\n @testing.for_all_dtypes(no_complex=True)\n def test_nanargmin_zero_size_axis0(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n return xp.nanargmin(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmin_zero_size_axis1(self, xp, dtype):\n a = testing.shaped_random((0, 1), xp, dtype)\n return xp.nanargmin(a, axis=1)\n\n\n@testing.gpu\nclass TestNanArgMax(unittest.TestCase):\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_all(self, xp, dtype):\n a = testing.shaped_random((2, 3), xp, dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmax_nan(self, xp, dtype):\n a = xp.array([float('nan'), -1, 1], dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmax_nan2(self, xp, dtype):\n a = xp.array([float('nan'), float('nan'), -1, 1], dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmax_nan3(self, xp, dtype):\n a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmax_nan4(self, xp, dtype):\n a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],\n dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose(accept_error=ValueError)\n def test_nanargmax_nan5(self, xp, dtype):\n a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],\n dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_axis_large(self, xp, dtype):\n a = testing.shaped_random((3, 1000), xp, dtype)\n return xp.nanargmax(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_axis0(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return xp.nanargmax(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_axis1(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return xp.nanargmax(a, axis=1)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_axis2(self, xp, dtype):\n a = testing.shaped_random((2, 3, 4), xp, dtype)\n return xp.nanargmax(a, axis=2)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_tie(self, xp, dtype):\n a = xp.array([0, 5, 2, 3, 4, 5], dtype)\n return xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n def test_nanargmax_zero_size(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n xp.nanargmax(a)\n\n @testing.for_all_dtypes(no_complex=True)\n def test_nanargmax_zero_size_axis0(self, dtype):\n for xp in (numpy, cupy):\n a = testing.shaped_random((0, 1), xp, dtype)\n with pytest.raises(ValueError):\n return xp.nanargmax(a, axis=0)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_cupy_allclose()\n def test_nanargmax_zero_size_axis1(self, xp, dtype):\n a = testing.shaped_random((0, 1), xp, dtype)\n return xp.nanargmax(a, axis=1)\n\n\n@testing.gpu\n@testing.parameterize(*testing.product(\n {'bins': [\n [],\n [0, 1, 2, 4, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [0.0, 1.0, 2.5, 4.0, 10.0],\n [-1.0, 1.0, 2.5, 4.0, 20.0],\n [1.5, 2.5, 4.0, 6.0],\n [float('-inf'), 1.5, 2.5, 4.0, 6.0],\n [1.5, 2.5, 4.0, 6.0, float('inf')],\n [float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')],\n [0.0, 1.0, 1.0, 4.0, 4.0, 10.0],\n [0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0],\n ],\n 'side': ['left', 'right'],\n 'shape': [(), (10,), (6, 3, 3)]})\n)\nclass TestSearchSorted(unittest.TestCase):\n\n @testing.for_all_dtypes(no_bool=True)\n @testing.numpy_cupy_array_equal()\n def test_searchsorted(self, xp, dtype):\n x = testing.shaped_arange(self.shape, xp, dtype)\n bins = xp.array(self.bins)\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n\n@testing.gpu\n@testing.parameterize(\n {'side': 'left'},\n {'side': 'right'})\nclass TestSearchSortedNanInf(unittest.TestCase):\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_nanbins(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n bins = xp.array([0, 1, 2, 4, 10, float('nan')])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_nan(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n x[5] = float('nan')\n bins = xp.array([0, 1, 2, 4, 10])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_nan_last(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n x[-1] = float('nan')\n bins = xp.array([0, 1, 2, 4, float('nan')])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_nan_last_repeat(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n x[-1] = float('nan')\n bins = xp.array([0, 1, 2, float('nan'), float('nan')])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_all_nans(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n x[-1] = float('nan')\n bins = xp.array([float('nan'), float('nan'), float('nan'),\n float('nan'), float('nan')])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_inf(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n x[5] = float('inf')\n bins = xp.array([0, 1, 2, 4, 10])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n @testing.numpy_cupy_array_equal()\n def test_searchsorted_minf(self, xp):\n x = testing.shaped_arange((10,), xp, xp.float64)\n x[5] = float('-inf')\n bins = xp.array([0, 1, 2, 4, 10])\n y = xp.searchsorted(bins, x, side=self.side)\n return y,\n\n\n@testing.gpu\nclass TestSearchSortedInvalid(unittest.TestCase):\n\n # Cant test unordered bins due to numpy undefined\n # behavior for searchsorted\n\n def test_searchsorted_ndbins(self):\n for xp in (numpy, cupy):\n x = testing.shaped_arange((10,), xp, xp.float64)\n bins = xp.array([[10, 4], [2, 1], [7, 8]])\n with pytest.raises(ValueError):\n xp.searchsorted(bins, x)\n\n\n@testing.gpu\nclass TestSearchSortedWithSorter(unittest.TestCase):\n\n @testing.numpy_cupy_array_equal()\n def test_sorter(self, xp):\n x = testing.shaped_arange((12,), xp, xp.float64)\n bins = xp.array([10, 4, 2, 1, 8])\n sorter = xp.array([3, 2, 1, 4, 0])\n y = xp.searchsorted(bins, x, sorter=sorter)\n return y,\n\n def test_invalid_sorter(self):\n for xp in (numpy, cupy):\n x = testing.shaped_arange((12,), xp, xp.float64)\n bins = xp.array([10, 4, 2, 1, 8])\n sorter = xp.array([0])\n with pytest.raises(ValueError):\n xp.searchsorted(bins, x, sorter=sorter)\n\n def test_nonint_sorter(self):\n for xp in (numpy, cupy):\n x = testing.shaped_arange((12,), xp, xp.float64)\n bins = xp.array([10, 4, 2, 1, 8])\n sorter = xp.array([], dtype=xp.float64)\n with pytest.raises(TypeError):\n xp.searchsorted(bins, x, sorter=sorter)\n" ]
[ [ "numpy.array", "numpy.random.randn", "numpy.random.randint", "numpy.empty" ] ]
elishatofunmi/ReinEnv
[ "ad86203d3f4bddc7a8239cefdfa31c1a8e5e9af8" ]
[ "pytennis/play.py" ]
[ "from keras.utils import to_categorical\nimport tensorflow as tf\nimport pygame\n\nclass pytennis:\n def __init__(self, fps = 50):\n self.net = Network(150,450,100,600)\n self.updateRewardA = 0\n self.updateRewardB = 0\n self.updateIter = 0\n self.lossA = 0\n self.lossB = 0\n \n # Testing\n self.net = Network(150, 450, 100, 600)\n self.NetworkA = self.net.network(300, ysource=100, Ynew=600) # Network A\n self.NetworkB = self.net.network(200, ysource=600, Ynew=100) # Network B\n # NetworkA\n\n # display test plot of network A\n #sns.jointplot(NetworkA[0], NetworkA[1])\n\n # display test plot of network B\n #sns.jointplot(NetworkB[0], NetworkB[1])\n \n \n \n self.out = self.net.DefaultToPosition(250)\n \n \n self.lastxcoordinate = 350\n \n pygame.init()\n self.BLACK = ( 0,0,0)\n \n self.myFontA = pygame.font.SysFont(\"Times New Roman\", 25)\n self.myFontB = pygame.font.SysFont(\"Times New Roman\", 25)\n self.myFontIter = pygame.font.SysFont('Times New Roman', 25)\n \n \n self.FPS = fps\n self.fpsClock = pygame.time.Clock()\n \n def setWindow(self):\n\n # set up the window\n self.DISPLAYSURF = pygame.display.set_mode((600, 700), 0, 32)\n pygame.display.set_caption('REINFORCEMENT LEARNING (Discrete Mathematics) - TABLE TENNIS')\n # set up the colors\n self.BLACK = ( 0,0,0)\n self.WHITE = (255, 255, 255)\n self.RED= (255,0,0)\n self.GREEN = ( 0, 255,0)\n self.BLUE = ( 0,0, 255)\n \n return\n \n \n\n \n def display(self):\n self.setWindow()\n self.DISPLAYSURF.fill(self.WHITE)\n pygame.draw.rect(self.DISPLAYSURF, self.GREEN, (150, 100, 300, 500))\n pygame.draw.rect(self.DISPLAYSURF, self.RED, (150, 340, 300, 20))\n pygame.draw.rect(self.DISPLAYSURF, self.BLACK, (0, 20, 600, 20))\n pygame.draw.rect(self.DISPLAYSURF, self.BLACK, (0, 660, 600, 20))\n return\n \n \n \n def reset(self):\n return\n \n def evaluate_state_from_last_coordinate(self, c):\n \"\"\"\n cmax: 450\n cmin: 150\n \n c definately will be between 150 and 450.\n state0 - (150 - 179)\n state1 - (180 - 209)\n state2 - (210 - 239)\n state3 - (240 - 269)\n state4 - (270 - 299)\n state5 - (300 - 329)\n state6 - (330 - 359)\n state7 - (360 - 389)\n state8 - (390 - 419)\n state9 - (420 - 450)\n \"\"\"\n if c >= 150 and c <=179:\n return 0\n elif c >= 180 and c <= 209:\n return 1\n elif c >=210 and c <= 239:\n return 2\n elif c >=240 and c <= 269:\n return 3\n elif c>= 270 and c<=299:\n return 4\n elif c >= 300 and c <= 329:\n return 5\n elif c >= 330 and c <= 359:\n return 6\n elif c >= 360 and c <= 389:\n return 7\n elif c >= 390 and c <= 419:\n return 8\n elif c >= 420 and c <= 450:\n return 9\n \n def evaluate_action(self, action, expectedState):\n if action == expectedState:\n return True\n else:\n return False\n \n def randomVal(self, action):\n \"\"\"\n cmax: 450\n cmin: 150\n \n c definately will be between 150 and 450.\n state0 - (150 - 179)\n state1 - (180 - 209)\n state2 - (210 - 239)\n state3 - (240 - 269)\n state4 - (270 - 299)\n state5 - (300 - 329)\n state6 - (330 - 359)\n state7 - (360 - 389)\n state8 - (390 - 419)\n state9 - (420 - 450)\n \"\"\"\n if action == 0:\n val = np.random.choice([i for i in range(150, 180)])\n elif action == 1:\n val = np.random.choice([i for i in range(180, 210)])\n elif action == 2:\n val = np.random.choice([i for i in range(210, 240)])\n elif action == 3:\n val = np.random.choice([i for i in range(240, 270)])\n elif action == 4:\n val = np.random.choice([i for i in range(270, 300)])\n elif action == 5:\n val = np.random.choice([i for i in range(300, 330)])\n elif action == 6:\n val = np.random.choice([i for i in range(330, 360)])\n elif action == 7:\n val = np.random.choice([i for i in range(360, 390)])\n elif action == 8:\n val = np.random.choice([i for i in range(390, 420)])\n else:\n val = np.random.choice([i for i in range(420, 450)])\n return val\n \n def stepA(self, action, count = 0):\n #playerA should play\n if count == 0:\n #playerax = lastxcoordinate\n self.NetworkA = self.net.network(self.lastxcoordinate, ysource = 100, Ynew = 600) #Network A\n self.out = self.net.DefaultToPosition(self.lastxcoordinate)\n\n #update lastxcoordinate\n\n self.bally = self.NetworkA[1][count]\n #here\n #self.playerax = self.out[count]\n self.playerbx = self.randomVal(action)\n \n \n# soundObj = pygame.mixer.Sound('sound/sound.wav')\n# soundObj.play()\n# time.sleep(0.4)\n# soundObj.stop()\n elif count == 49:\n self.ballx = self.NetworkA[0][count]\n self.bally = self.NetworkA[1][count]\n \n # move playerbx with respect to action \n self.playerbx = self.randomVal(action)\n\n\n else:\n self.ballx = self.NetworkA[0][count]\n self.bally = self.NetworkA[1][count]\n \n # move playerbx with respect to action \n# self.playerbx = self.randomVal(action)\n \n \n obs = self.evaluate_state_from_last_coordinate(int(self.ballx)) # last state of the ball\n reward = self.evaluate_action(action, obs)\n done = True\n info = ''\n\n\n return obs, reward, done, info\n \n \n def stepB(self, action, count):\n #playerB can play\n if count == 0:\n #playerbx = lastxcoordinate\n self.NetworkB = self.net.network(self.lastxcoordinate, ysource = 600, Ynew = 100) #Network B\n self.out = self.net.DefaultToPosition(self.lastxcoordinate)\n\n #update lastxcoordinate\n self.bally = self.NetworkB[1][count]\n #self.playerax = self.out[count] \n self.playerax = self.randomVal(action)\n\n# soundObj = pygame.mixer.Sound('sound/sound.wav')\n# soundObj.play()\n# time.sleep(0.4)\n# soundObj.stop()\n elif count ==49:\n self.ballx = self.NetworkA[0][count]\n self.bally = self.NetworkA[1][count]\n \n # move playerbx with respect to action \n self.playerbx = self.randomVal(action)\n \n else:\n self.ballx = self.NetworkB[0][count]\n self.bally = self.NetworkB[1][count]\n# self.playerbx = self.randomVal(action)\n \n obs = self.evaluate_state_from_last_coordinate(int(self.ballx)) # last state of the ball\n reward = self.evaluate_action(action, obs)\n done = True\n info = ''\n \n return obs, reward, done, info\n \n def computeLossA(self, reward):\n if reward == 0:\n self.lossA += 1\n else:\n self.lossA += 0\n return\n\n def computeLossB(self, reward):\n if reward == 0:\n self.lossB += 1\n else:\n self.lossB += 0\n return\n \n def render(self):\n # diplay team players\n self.PLAYERA = pygame.image.load('images/cap.jpg')\n self.PLAYERA = pygame.transform.scale(self.PLAYERA, (50, 50))\n self.PLAYERB = pygame.image.load('images/cap.jpg')\n self.PLAYERB = pygame.transform.scale(self.PLAYERB, (50, 50))\n self.ball = pygame.image.load('images/ball.png')\n self.ball = pygame.transform.scale(self.ball, (15, 15))\n\n self.playerax = 150\n self.playerbx = 250\n \n self.ballx = 250\n self.bally = 300\n \n \n \n \n count = 0\n nextplayer = 'A'\n #player A starts by playing with state 0\n obs, reward, done, info = self.stepA(0)\n stateA = obs\n stateB = obs\n next_state = 0\n \n iterations = 20000\n iteration = 0\n restart = False\n \n while iteration < iterations:\n self.display()\n self.randNumLabelA = self.myFontA.render('A (Win): '+str(self.updateRewardA) + ', A(loss): '+str(self.lossA), 1, self.BLACK)\n self.randNumLabelB = self.myFontB.render('B (Win): '+str(self.updateRewardB) + ', B(loss): '+ str(self.lossB), 1, self.BLACK)\n self.randNumLabelIter = self.myFontIter.render('Iterations: '+str(self.updateIter), 1, self.BLACK)\n if nextplayer == 'A':\n\n if count == 0:\n # Online DQN evaluates what to do\n q_valueA = AgentA.model.predict([stateA])\n actionA = AgentA.epsilon_greedy(q_valueA, iteration)\n \n # Online DQN plays\n obs, reward, done, info = self.stepA(action = actionA, count = count)\n next_stateA = obs\n \n # Let's memorize what just happened\n AgentA.replay_memory.append((stateA, actionA, reward, next_stateA, 1.0 - done))\n stateA = next_stateA\n \n \n else: \n # Online DQN evaluates what to do\n q_valueA = AgentA.model.predict([stateA])\n actionA = AgentA.epsilon_greedy(q_valueA, iteration)\n \n # Online DQN plays\n \n obs, reward, done, info = self.stepA(action = actionA, count = count)\n next_stateA = obs\n \n # Let's memorize what just happened\n# AgentA.replay_memory.append((state, action, reward, next_state, 1.0 - done))\n stateA = next_stateA\n \n count += 1 \n if count == 50:\n count = 0\n \n\n self.updateRewardA += reward\n self.computeLossA(reward)\n \n #restart the game if player A fails to get the ball, and let B start the game\n if reward == 0:\n restart = True\n time.sleep(0.5)\n nextplayer = 'B'\n self.playerbx = self.ballx\n else:\n restart = False\n \n # Sample memories and use the target DQN to produce the target Q-Value\n X_state_val, X_action_val, rewards, X_next_state_val, continues = (AgentA.sample_memories(AgentA.batch_size))\n next_q_values = AgentA.model.predict([X_next_state_val])\n max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)\n y_val = rewards + continues * AgentA.discount_rate * max_next_q_values\n\n # Train the online DQN\n AgentA.model.fit(X_state_val,tf.keras.utils.to_categorical(X_next_state_val, num_classes=10), verbose = 0)\n \n nextplayer = 'B'\n self.updateIter += 1\n \n \n #evaluate A\n else:\n nextplayer = 'A'\n \n \n\n else:\n\n if count == 0:\n # Online DQN evaluates what to do\n q_valueB = AgentB.model.predict([stateB])\n actionB = AgentB.epsilon_greedy(q_valueB, iteration)\n \n # Online DQN plays\n obs, reward, done, info = self.stepB(action = actionB, count = count)\n next_stateB = obs\n \n # Let's memorize what just happened\n AgentB.replay_memory.append((stateB, actionB, reward, next_stateB, 1.0 - done))\n stateB = next_stateB\n else:\n # Online DQN evaluates what to do\n q_valueB = AgentB.model.predict([stateB])\n actionB = AgentB.epsilon_greedy(q_valueB, iteration)\n \n # Online DQN plays\n obs, reward, done, info = self.stepB(action = actionB, count = count)\n next_stateB = obs\n \n # Let's memorize what just happened\n# AgentB.replay_memory.append((state, action, reward, next_state, 1.0 - done))\n stateB = next_stateB\n \n count += 1\n if count == 50:\n count = 0\n \n \n self.updateRewardB += reward\n self.computeLossB(reward)\n \n \n #restart the game if player A fails to get the ball, and let B start the game\n if reward == 0:\n restart = True\n time.sleep(0.5)\n nextplayer = 'A'\n self.playerax = self.ballx\n else:\n restart = False\n \n # Sample memories and use the target DQN to produce the target Q-Value\n X_state_val, X_action_val, rewards, X_next_state_val, continues = (AgentB.sample_memories(AgentB.batch_size))\n next_q_values = AgentB.model.predict([X_next_state_val])\n max_next_q_values = np.max(next_q_values, axis=1, keepdims=True)\n y_val = rewards + continues * AgentB.discount_rate * max_next_q_values\n\n # Train the online DQN\n AgentB.model.fit(X_state_val,tf.keras.utils.to_categorical(X_next_state_val, num_classes=10), verbose = 0)\n \n nextplayer = 'A'\n self.updateIter += 1\n #evaluate B\n else:\n nextplayer = 'B'\n\n count += 1\n #CHECK BALL MOVEMENT\n self.DISPLAYSURF.blit(self.PLAYERA, (self.playerax, 50))\n self.DISPLAYSURF.blit(self.PLAYERB, (self.playerbx, 600))\n self.DISPLAYSURF.blit(self.ball, (self.ballx, self.bally))\n self.DISPLAYSURF.blit(self.randNumLabelA, (300, 630))\n self.DISPLAYSURF.blit(self.randNumLabelB, (300, 40))\n self.DISPLAYSURF.blit(self.randNumLabelIter, (50, 40))\n\n #update last coordinate\n self.lastxcoordinate = self.ballx \n\n pygame.display.update()\n self.fpsClock.tick(self.FPS)\n\n for event in pygame.event.get():\n\n if event.type == QUIT:\n AgentA.model.save('AgentA.h5')\n AgentB.model.save('AgentB.h5')\n pygame.quit()\n sys.exit()\n\n\n \n \n " ]
[ [ "tensorflow.keras.utils.to_categorical" ] ]
scribbler00/mtl-sps_ern-and-hps_taskembbedding
[ "d8e9de4919cdf8f5b3167f8cb0c0b50ea89b4341" ]
[ "dies/dies/tests/test_regression.py" ]
[ "import random\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import mean_squared_error as mse\nfrom sklearn import datasets\n\nimport unittest\n\nimport torch\nfrom fastai.basic_train import Learner\nfrom fastai.callbacks import OneCycleScheduler\nfrom fastai.basic_data import DatasetType\n\nfrom dies.data import (\n ds_from_df_from_dtypes,\n scale_datasets,\n create_databunch,\n ds_from_df,\n)\nfrom dies import data\nfrom dies.mlp import MultiLayerPeceptron\nfrom dies.embedding import Embedding\nfrom dies.utils_pytorch import dev_to_np, xavier_init_uniform\nfrom dies.autoencoder import Autoencoder\n\nrandom_state = 0\n\n\ndef set_random_states():\n torch.manual_seed(random_state)\n np.random.seed(random_state)\n random.seed(random_state)\n\n\ndef get_df():\n X, y, _ = datasets.make_regression(\n n_samples=50,\n n_features=2,\n bias=1000,\n n_informative=2,\n noise=10,\n coef=True,\n random_state=42,\n )\n\n df1 = pd.DataFrame(\n data=np.concatenate([X, y.reshape(-1, 1)], axis=1),\n columns=[\"feat1\", \"feat2\", \"target\"],\n )\n cats = np.random.randint(low=0, high=10, size=(df1.shape[0], 2))\n df1[\"cat_1\"] = cats[:, 0]\n df1[\"cat_2\"] = cats[:, 1]\n\n index1 = pd.date_range(\"2000-01-01\", \"2000-06-01\", periods=df1.shape[0])\n index1 = pd.to_datetime(index1, utc=True)\n df1.index = index1\n\n return df1\n\n\nclass TestMLP(unittest.TestCase):\n def setUp(self):\n n_features = 3\n device = \"cpu\"\n\n df = get_df()\n ds = ds_from_df_from_dtypes(df, \"target\")\n self.ds_tr, self.ds_val, _ = data.train_test_split_dataset(ds)\n self.db = create_databunch(\n self.ds_tr, self.ds_val, None, batch_size=40, device=\"cpu\"\n )\n set_random_states()\n\n def test_simple_mlp(self):\n input_size = self.ds_tr.x.shape[1]\n df_tr = self.ds_tr.to_df()\n\n ann_model = MultiLayerPeceptron(\n input_size, ann_structure=[2, 1], embedding_module=None, dropout=0.1\n )\n ann_model.apply(xavier_init_uniform)\n\n learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_init = mse(df_tr.target, y_hat)\n learn.fit(1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr.target, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_mlp_with_yrange(self):\n input_size = self.ds_tr.x.shape[1]\n df_tr = self.ds_tr.to_df()\n\n y_ranges = self.ds_tr.y_ranges\n\n ann_model = MultiLayerPeceptron(\n input_size,\n ann_structure=[2, 1],\n embedding_module=None,\n dropout=0.1,\n y_ranges=y_ranges,\n )\n ann_model.apply(xavier_init_uniform)\n\n learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_init = mse(df_tr.target, y_hat)\n learn.fit(1, lr=0.1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr.target, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_simple_mlp_with_embedding(self):\n input_size = self.ds_tr.x.shape[1]\n df_tr = self.ds_tr.to_df()\n\n embedding_module = Embedding([11, 11], embedding_dropout=0.1)\n\n ann_model = MultiLayerPeceptron(\n input_size,\n ann_structure=[2, 1],\n embedding_module=embedding_module,\n dropout=0.1,\n )\n ann_model.apply(xavier_init_uniform)\n\n learn = Learner(self.db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_init = mse(df_tr.target, y_hat)\n learn.fit(1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr.target, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_true(self):\n self.assertTrue(True)\n\n\nclass TestAE(unittest.TestCase):\n def setUp(self):\n n_features = 3\n device = \"cpu\"\n\n self.df = get_df()\n self.df.drop(\"target\", axis=1, inplace=True)\n\n set_random_states()\n\n def test_simple_ae(self):\n cols = [\"feat1\", \"feat2\"]\n ds = ds_from_df(self.df, y_columns=cols, x_columns=cols)\n ds_tr, ds_val, _ = data.train_test_split_dataset(ds)\n db = create_databunch(ds_tr, ds_val, None, batch_size=40, device=\"cpu\")\n df_tr = ds_tr.to_df()\n\n input_size = ds_tr.x.shape[1]\n print(input_size, ds_tr.y.shape[1])\n ann_structure = [10, 4, 1]\n\n ann_model = Autoencoder(input_size=input_size, ann_structure=ann_structure)\n ann_model.apply(xavier_init_uniform)\n\n learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n\n target_cols = [\"feat1_target\", \"feat2_target\"]\n e_init = mse(df_tr[target_cols].values, y_hat)\n learn.fit(1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr[target_cols].values, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_ae_with_yranges(self):\n cols = [\"feat1\", \"feat2\"]\n ds = ds_from_df(self.df, y_columns=cols, x_columns=cols)\n ds_tr, ds_val, _ = data.train_test_split_dataset(ds)\n db = create_databunch(ds_tr, ds_val, None, batch_size=40, device=\"cpu\")\n df_tr = ds_tr.to_df()\n\n input_size = ds_tr.x.shape[1]\n print(input_size, ds_tr.y.shape[1])\n ann_structure = [10, 4, 1]\n\n y_ranges = ds_tr.y_ranges\n\n ann_model = Autoencoder(\n input_size=input_size, ann_structure=ann_structure, y_ranges=y_ranges\n )\n\n ann_model.apply(xavier_init_uniform)\n set_random_states()\n learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n\n target_cols = [\"feat1_target\", \"feat2_target\"]\n e_init = mse(df_tr[target_cols].values, y_hat)\n learn.fit(1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr[target_cols].values, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_ae_with_embedding_and_yrange(self):\n cols = [\"feat1\", \"feat2\"]\n ds = ds_from_df(\n self.df, y_columns=cols, x_columns=cols, cat_columns=[\"cat_1\", \"cat_2\"]\n )\n ds_tr, ds_val, _ = data.train_test_split_dataset(ds)\n db = create_databunch(ds_tr, ds_val, None, batch_size=40, device=\"cpu\")\n df_tr = ds_tr.to_df()\n\n y_ranges = ds_tr.y_ranges\n\n input_size = ds_tr.x.shape[1]\n print(input_size, ds_tr.y.shape[1])\n ann_structure = [10, 4, 1]\n\n embedding_module = Embedding([11, 11], embedding_dropout=0.1)\n ann_model = Autoencoder(\n input_size=input_size,\n ann_structure=ann_structure,\n embedding_module=embedding_module,\n embeding_position=\"start\",\n y_ranges=y_ranges,\n )\n set_random_states()\n ann_model.apply(xavier_init_uniform)\n set_random_states()\n\n learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n\n target_cols = [\"feat1_target\", \"feat2_target\"]\n e_init = mse(df_tr[target_cols].values, y_hat)\n learn.fit(1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr[target_cols].values, y_hat)\n\n # adds some small tolerance\n self.assertLess(\n e_end, e_init + 0.05,\n )\n\n def test_ae_with_embedding_at_start(self):\n cols = [\"feat1\", \"feat2\"]\n ds = ds_from_df(\n self.df, y_columns=cols, x_columns=cols, cat_columns=[\"cat_1\", \"cat_2\"]\n )\n ds_tr, ds_val, _ = data.train_test_split_dataset(ds)\n db = create_databunch(ds_tr, ds_val, None, batch_size=40, device=\"cpu\")\n df_tr = ds_tr.to_df()\n\n input_size = ds_tr.x.shape[1]\n print(input_size, ds_tr.y.shape[1])\n ann_structure = [10, 4, 1]\n\n embedding_module = Embedding([11, 11], embedding_dropout=0.1)\n ann_model = Autoencoder(\n input_size=input_size,\n ann_structure=ann_structure,\n embedding_module=embedding_module,\n embeding_position=\"start\",\n )\n ann_model.apply(xavier_init_uniform)\n\n learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n\n target_cols = [\"feat1_target\", \"feat2_target\"]\n e_init = mse(df_tr[target_cols].values, y_hat)\n learn.fit(1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr[target_cols].values, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_ae_with_embedding_at_bottleneck(self):\n cols = [\"feat1\", \"feat2\"]\n ds = ds_from_df(\n self.df, y_columns=cols, x_columns=cols, cat_columns=[\"cat_1\", \"cat_2\"]\n )\n ds_tr, ds_val, _ = data.train_test_split_dataset(ds)\n db = create_databunch(ds_tr, ds_val, None, batch_size=40, device=\"cpu\")\n df_tr = ds_tr.to_df()\n\n input_size = ds_tr.x.shape[1]\n print(input_size, ds_tr.y.shape[1])\n ann_structure = [10, 4, 1]\n\n embedding_module = Embedding([11, 11], embedding_dropout=0.1)\n ann_model = Autoencoder(\n input_size=input_size,\n ann_structure=ann_structure,\n embedding_module=embedding_module,\n embeding_position=\"bottleneck\",\n )\n ann_model.apply(xavier_init_uniform)\n\n learn = Learner(db, ann_model, loss_func=torch.nn.MSELoss())\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n\n target_cols = [\"feat1_target\", \"feat2_target\"]\n e_init = mse(df_tr[target_cols].values, y_hat)\n learn.fit(1, lr=0.1)\n y_hat, _ = dev_to_np(learn.get_preds(DatasetType.Train))\n e_end = mse(df_tr[target_cols].values, y_hat)\n\n self.assertLess(e_end, e_init)\n\n def test_true(self):\n self.assertTrue(True)\n" ]
[ [ "pandas.to_datetime", "sklearn.metrics.mean_squared_error", "torch.nn.MSELoss", "numpy.random.seed", "pandas.date_range", "torch.manual_seed", "numpy.random.randint", "sklearn.datasets.make_regression" ] ]
Jianxun-Wang/PICNNSR
[ "b12ed0cb89b8136a23d213c6c2fb0663e6064299" ]
[ "demo0/foamFileOperation.py" ]
[ "# Python function to manipulate OpenFOAM files\n# Developer: Jian-Xun Wang (jwang33@nd.edu)\n\n###############################################################################\n\n# system import\nimport numpy as np\nimport numpy.matlib\nimport sys # Add extra path/directory\nimport os\nimport os.path as ospt\nimport shutil\nimport subprocess # Call the command line\nfrom subprocess import call\nimport matplotlib.pyplot as plt # For plotting\nimport re\nimport tempfile\nimport pdb\nfrom matplotlib import pyplot as plt\n# local import\nfrom PIL import Image\nimport pandas as pd\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.neural_network import MLPRegressor\nimport multiprocessing\nfrom functools import partial\nimport time\nimport multiprocessing\nfrom functools import partial\n\nimport scipy.sparse as sp\n\nglobal unitTest \nunitTest = False;\n\n\n\n\n\n\ndef readVectorFromFile(UFile):\n\t\"\"\" \n\tArg: \n\ttauFile: The directory path of OpenFOAM vector file (e.g., velocity)\n\n\tRegurn: \n\tvector: Matrix of vector \n\t\"\"\"\n\tresMid = extractVector(UFile)\n\tfout = open('Utemp', 'w');\n\tglob_pattern = resMid.group()\n\tglob_pattern = re.sub(r'\\(', '', glob_pattern)\n\tglob_pattern = re.sub(r'\\)', '', glob_pattern)\n\tfout.write(glob_pattern)\n\tfout.close();\n\tvector = np.loadtxt('Utemp')\n\treturn vector\n\n\n\n\n\n\t\ndef readScalarFromFile(fileName): \n\t\"\"\" \n\n\tArg: \n\tfileName: The file name of OpenFOAM scalar field\n\n\tRegurn: \n\ta vector of scalar field \n\t\"\"\"\n\tresMid = extractScalar(fileName)\n\t\n\t# write it in Tautemp \n\tfout = open('temp.txt', 'w')\n\tglob_patternx = resMid.group()\n\tglob_patternx = re.sub(r'\\(', '', glob_patternx)\n\tglob_patternx = re.sub(r'\\)', '', glob_patternx)\n\tfout.write(glob_patternx)\n\tfout.close();\n\tscalarVec = np.loadtxt('temp.txt')\n\treturn scalarVec\n\n\n################################################ Regular Expression ##################################################### \n\n\ndef extractVector(vectorFile):\n\t\"\"\" Function is using regular expression select Vector value out\n\t\n\tArgs:\n\tUFile: The directory path of file: U\n\n\tReturns:\n\tresMid: the U as (Ux1,Uy1,Uz1);(Ux2,Uy2,Uz2);........\n\t\"\"\"\n\n\tfin = open(vectorFile, 'r') # need consider directory\n\tline = fin.read() # line is U file to read\n\tfin.close()\n\t### select U as (X X X)pattern (Using regular expression)\n\tpatternMid = re.compile(r\"\"\"\n\t(\n\t\\( # match(\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t(\\ ) # match space\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t(\\ ) # match space\n\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t\\) # match )\n\t\\n # match next line\n\t)+ # search greedly\n\t\"\"\",re.DOTALL | re.VERBOSE)\n\tresMid = patternMid.search(line)\n\treturn resMid \n\t\ndef extractScalar(scalarFile):\n\t\"\"\" subFunction of readTurbStressFromFile\n\t\tUsing regular expression to select scalar value out \n\t\n\tArgs:\n\tscalarFile: The directory path of file of scalar\n\n\tReturns:\n\tresMid: scalar selected;\n\t\t\tyou need use resMid.group() to see the content.\n\t\"\"\"\n\tfin = open(scalarFile, 'r') # need consider directory\n\tline = fin.read() # line is k file to read\n\tfin.close()\n\t### select k as ()pattern (Using regular expression)\n\tpatternMid = re.compile(r\"\"\"\n\t\t\\( # match\"(\"\n\t\t\\n # match next line\n\t\t(\n\t\t[\\+\\-]?[\\d]+([\\.][\\d]*)?([Ee][+-]?[\\d]+)? # match figures\n\t\t\\n # match next line\n\t\t)+ # search greedly\n\t\t\\) # match\")\"\n\t\"\"\",re.DOTALL | re.VERBOSE)\n\tresMid = patternMid.search(line)\n\n\treturn resMid\n\n" ]
[ [ "numpy.loadtxt" ] ]
rakmakan/Chatbot
[ "d04bc1526b56961a16c25148d9ef18c4f157e9c4", "901ac307b68486d8289105c159ca702318bea5b0", "901ac307b68486d8289105c159ca702318bea5b0", "d04bc1526b56961a16c25148d9ef18c4f157e9c4", "d04bc1526b56961a16c25148d9ef18c4f157e9c4", "d04bc1526b56961a16c25148d9ef18c4f157e9c4", "d04bc1526b56961a16c25148d9ef18c4f157e9c4" ]
[ "chatbot_env/Lib/site-packages/scipy/fftpack/tests/gen_fftw_ref.py", "chatbot_env/Lib/site-packages/sklearn/manifold/_isomap.py", "chatbot_env/Lib/site-packages/sklearn/preprocessing/tests/test_common.py", "chatbot_env/Lib/site-packages/sklearn/impute/_iterative.py", "chatbot_env/Lib/site-packages/scipy/integrate/__init__.py", "chatbot_env/Lib/site-packages/sklearn/model_selection/tests/test_search.py", "chatbot_env/Lib/site-packages/scipy/io/matlab/tests/test_streams.py" ]
[ "from __future__ import division, print_function, absolute_import\n\nfrom subprocess import Popen, PIPE, STDOUT\n\nimport numpy as np\n\nSZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024]\n\n\ndef gen_data(dt):\n arrays = {}\n\n if dt == np.float128:\n pg = './fftw_longdouble'\n elif dt == np.double:\n pg = './fftw_double'\n elif dt == np.float32:\n pg = './fftw_single'\n else:\n raise ValueError(\"unknown: %s\" % dt)\n # Generate test data using FFTW for reference\n for type in [1, 2, 3, 4, 5, 6, 7, 8]:\n arrays[type] = {}\n for sz in SZ:\n a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT)\n st = [i.decode('ascii').strip() for i in a.stdout.readlines()]\n arrays[type][sz] = np.fromstring(\",\".join(st), sep=',', dtype=dt)\n\n return arrays\n\n\n# generate single precision data\ndata = gen_data(np.float32)\nfilename = 'fftw_single_ref'\n# Save ref data into npz format\nd = {'sizes': SZ}\nfor type in [1, 2, 3, 4]:\n for sz in SZ:\n d['dct_%d_%d' % (type, sz)] = data[type][sz]\n\nd['sizes'] = SZ\nfor type in [5, 6, 7, 8]:\n for sz in SZ:\n d['dst_%d_%d' % (type-4, sz)] = data[type][sz]\nnp.savez(filename, **d)\n\n\n# generate double precision data\ndata = gen_data(np.float64)\nfilename = 'fftw_double_ref'\n# Save ref data into npz format\nd = {'sizes': SZ}\nfor type in [1, 2, 3, 4]:\n for sz in SZ:\n d['dct_%d_%d' % (type, sz)] = data[type][sz]\n\nd['sizes'] = SZ\nfor type in [5, 6, 7, 8]:\n for sz in SZ:\n d['dst_%d_%d' % (type-4, sz)] = data[type][sz]\nnp.savez(filename, **d)\n\n# generate long double precision data\ndata = gen_data(np.float128)\nfilename = 'fftw_longdouble_ref'\n# Save ref data into npz format\nd = {'sizes': SZ}\nfor type in [1, 2, 3, 4]:\n for sz in SZ:\n d['dct_%d_%d' % (type, sz)] = data[type][sz]\n\nd['sizes'] = SZ\nfor type in [5, 6, 7, 8]:\n for sz in SZ:\n d['dst_%d_%d' % (type-4, sz)] = data[type][sz]\nnp.savez(filename, **d)\n", "\"\"\"Isomap for manifold learning\"\"\"\n\n# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>\n# License: BSD 3 clause (C) 2011\n\nimport numpy as np\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..neighbors import NearestNeighbors, kneighbors_graph\nfrom ..utils.deprecation import deprecated\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.graph import graph_shortest_path\nfrom ..decomposition import KernelPCA\nfrom ..preprocessing import KernelCenterer\n\n\nclass Isomap(TransformerMixin, BaseEstimator):\n \"\"\"Isomap Embedding\n\n Non-linear dimensionality reduction through Isometric Mapping\n\n Read more in the :ref:`User Guide <isomap>`.\n\n Parameters\n ----------\n n_neighbors : integer\n number of neighbors to consider for each point.\n\n n_components : integer\n number of coordinates for the manifold\n\n eigen_solver : ['auto'|'arpack'|'dense']\n 'auto' : Attempt to choose the most efficient solver\n for the given problem.\n\n 'arpack' : Use Arnoldi decomposition to find the eigenvalues\n and eigenvectors.\n\n 'dense' : Use a direct solver (i.e. LAPACK)\n for the eigenvalue decomposition.\n\n tol : float\n Convergence tolerance passed to arpack or lobpcg.\n not used if eigen_solver == 'dense'.\n\n max_iter : integer\n Maximum number of iterations for the arpack solver.\n not used if eigen_solver == 'dense'.\n\n path_method : string ['auto'|'FW'|'D']\n Method to use in finding shortest path.\n\n 'auto' : attempt to choose the best algorithm automatically.\n\n 'FW' : Floyd-Warshall algorithm.\n\n 'D' : Dijkstra's algorithm.\n\n neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']\n Algorithm to use for nearest neighbors search,\n passed to neighbors.NearestNeighbors instance.\n\n n_jobs : int or None, default=None\n The number of parallel jobs to run.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n metric : string, or callable, default=\"minkowski\"\n The metric to use when calculating distance between instances in a\n feature array. If metric is a string or callable, it must be one of\n the options allowed by :func:`sklearn.metrics.pairwise_distances` for\n its metric parameter.\n If metric is \"precomputed\", X is assumed to be a distance matrix and\n must be square. X may be a :term:`Glossary <sparse graph>`.\n\n .. versionadded:: 0.22\n\n p : int, default=2\n Parameter for the Minkowski metric from\n sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is\n equivalent to using manhattan_distance (l1), and euclidean_distance\n (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n\n .. versionadded:: 0.22\n\n metric_params : dict, default=None\n Additional keyword arguments for the metric function.\n\n .. versionadded:: 0.22\n\n Attributes\n ----------\n embedding_ : array-like, shape (n_samples, n_components)\n Stores the embedding vectors.\n\n kernel_pca_ : object\n :class:`~sklearn.decomposition.KernelPCA` object used to implement the\n embedding.\n\n nbrs_ : sklearn.neighbors.NearestNeighbors instance\n Stores nearest neighbors instance, including BallTree or KDtree\n if applicable.\n\n dist_matrix_ : array-like, shape (n_samples, n_samples)\n Stores the geodesic distance matrix of training data.\n\n Examples\n --------\n >>> from sklearn.datasets import load_digits\n >>> from sklearn.manifold import Isomap\n >>> X, _ = load_digits(return_X_y=True)\n >>> X.shape\n (1797, 64)\n >>> embedding = Isomap(n_components=2)\n >>> X_transformed = embedding.fit_transform(X[:100])\n >>> X_transformed.shape\n (100, 2)\n\n References\n ----------\n\n .. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric\n framework for nonlinear dimensionality reduction. Science 290 (5500)\n \"\"\"\n\n def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',\n tol=0, max_iter=None, path_method='auto',\n neighbors_algorithm='auto', n_jobs=None, metric='minkowski',\n p=2, metric_params=None):\n self.n_neighbors = n_neighbors\n self.n_components = n_components\n self.eigen_solver = eigen_solver\n self.tol = tol\n self.max_iter = max_iter\n self.path_method = path_method\n self.neighbors_algorithm = neighbors_algorithm\n self.n_jobs = n_jobs\n self.metric = metric\n self.p = p\n self.metric_params = metric_params\n\n def _fit_transform(self, X):\n\n self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,\n algorithm=self.neighbors_algorithm,\n metric=self.metric, p=self.p,\n metric_params=self.metric_params,\n n_jobs=self.n_jobs)\n self.nbrs_.fit(X)\n\n self.kernel_pca_ = KernelPCA(n_components=self.n_components,\n kernel=\"precomputed\",\n eigen_solver=self.eigen_solver,\n tol=self.tol, max_iter=self.max_iter,\n n_jobs=self.n_jobs)\n\n kng = kneighbors_graph(self.nbrs_, self.n_neighbors,\n metric=self.metric, p=self.p,\n metric_params=self.metric_params,\n mode='distance', n_jobs=self.n_jobs)\n\n self.dist_matrix_ = graph_shortest_path(kng,\n method=self.path_method,\n directed=False)\n G = self.dist_matrix_ ** 2\n G *= -0.5\n\n self.embedding_ = self.kernel_pca_.fit_transform(G)\n\n @deprecated(\"Attribute `training_data_` was deprecated in version 0.22 and\"\n \" will be removed in 0.24.\")\n @property\n def training_data_(self):\n check_is_fitted(self)\n return self.nbrs_._fit_X\n\n def reconstruction_error(self):\n \"\"\"Compute the reconstruction error for the embedding.\n\n Returns\n -------\n reconstruction_error : float\n\n Notes\n -----\n The cost function of an isomap embedding is\n\n ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``\n\n Where D is the matrix of distances for the input data X,\n D_fit is the matrix of distances for the output embedding X_fit,\n and K is the isomap kernel:\n\n ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``\n \"\"\"\n G = -0.5 * self.dist_matrix_ ** 2\n G_center = KernelCenterer().fit_transform(G)\n evals = self.kernel_pca_.lambdas_\n return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]\n\n def fit(self, X, y=None):\n \"\"\"Compute the embedding vectors for data X\n\n Parameters\n ----------\n X : {array-like, sparse graph, BallTree, KDTree, NearestNeighbors}\n Sample data, shape = (n_samples, n_features), in the form of a\n numpy array, sparse graph, precomputed tree, or NearestNeighbors\n object.\n\n y : Ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._fit_transform(X)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X : {array-like, sparse graph, BallTree, KDTree}\n Training vector, where n_samples in the number of samples\n and n_features is the number of features.\n\n y : Ignored\n\n Returns\n -------\n X_new : array-like, shape (n_samples, n_components)\n \"\"\"\n self._fit_transform(X)\n return self.embedding_\n\n def transform(self, X):\n \"\"\"Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X : array-like, shape (n_queries, n_features)\n If neighbors_algorithm='precomputed', X is assumed to be a\n distance matrix or a sparse graph of shape\n (n_queries, n_samples_fit).\n\n Returns\n -------\n X_new : array-like, shape (n_queries, n_components)\n \"\"\"\n check_is_fitted(self)\n distances, indices = self.nbrs_.kneighbors(X, return_distance=True)\n\n # Create the graph of shortest distances from X to\n # training data via the nearest neighbors of X.\n # This can be done as a single array operation, but it potentially\n # takes a lot of memory. To avoid that, use a loop:\n\n n_samples_fit = self.nbrs_.n_samples_fit_\n n_queries = distances.shape[0]\n G_X = np.zeros((n_queries, n_samples_fit))\n for i in range(n_queries):\n G_X[i] = np.min(self.dist_matrix_[indices[i]] +\n distances[i][:, None], 0)\n\n G_X **= 2\n G_X *= -0.5\n\n return self.kernel_pca_.transform(G_X)\n", "import warnings\n\nimport pytest\nimport numpy as np\n\nfrom scipy import sparse\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.base import clone\n\nfrom sklearn.preprocessing import maxabs_scale\nfrom sklearn.preprocessing import minmax_scale\nfrom sklearn.preprocessing import scale\nfrom sklearn.preprocessing import power_transform\nfrom sklearn.preprocessing import quantile_transform\nfrom sklearn.preprocessing import robust_scale\n\nfrom sklearn.preprocessing import MaxAbsScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import PowerTransformer\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.preprocessing import RobustScaler\n\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_allclose\n\niris = load_iris()\n\n\ndef _get_valid_samples_by_column(X, col):\n \"\"\"Get non NaN samples in column of X\"\"\"\n return X[:, [col]][~np.isnan(X[:, col])]\n\n\n@pytest.mark.parametrize(\n \"est, func, support_sparse, strictly_positive\",\n [(MaxAbsScaler(), maxabs_scale, True, False),\n (MinMaxScaler(), minmax_scale, False, False),\n (StandardScaler(), scale, False, False),\n (StandardScaler(with_mean=False), scale, True, False),\n (PowerTransformer('yeo-johnson'), power_transform, False, False),\n (PowerTransformer('box-cox'), power_transform, False, True),\n (QuantileTransformer(n_quantiles=10), quantile_transform, True, False),\n (RobustScaler(), robust_scale, False, False),\n (RobustScaler(with_centering=False), robust_scale, True, False)]\n)\ndef test_missing_value_handling(est, func, support_sparse, strictly_positive):\n # check that the preprocessing method let pass nan\n rng = np.random.RandomState(42)\n X = iris.data.copy()\n n_missing = 50\n X[rng.randint(X.shape[0], size=n_missing),\n rng.randint(X.shape[1], size=n_missing)] = np.nan\n if strictly_positive:\n X += np.nanmin(X) + 0.1\n X_train, X_test = train_test_split(X, random_state=1)\n # sanity check\n assert not np.all(np.isnan(X_train), axis=0).any()\n assert np.any(np.isnan(X_train), axis=0).all()\n assert np.any(np.isnan(X_test), axis=0).all()\n X_test[:, 0] = np.nan # make sure this boundary case is tested\n\n with pytest.warns(None) as records:\n Xt = est.fit(X_train).transform(X_test)\n # ensure no warnings are raised\n assert len(records) == 0\n # missing values should still be missing, and only them\n assert_array_equal(np.isnan(Xt), np.isnan(X_test))\n\n # check that the function leads to the same results as the class\n with pytest.warns(None) as records:\n Xt_class = est.transform(X_train)\n assert len(records) == 0\n Xt_func = func(X_train, **est.get_params())\n assert_array_equal(np.isnan(Xt_func), np.isnan(Xt_class))\n assert_allclose(Xt_func[~np.isnan(Xt_func)], Xt_class[~np.isnan(Xt_class)])\n\n # check that the inverse transform keep NaN\n Xt_inv = est.inverse_transform(Xt)\n assert_array_equal(np.isnan(Xt_inv), np.isnan(X_test))\n # FIXME: we can introduce equal_nan=True in recent version of numpy.\n # For the moment which just check that non-NaN values are almost equal.\n assert_allclose(Xt_inv[~np.isnan(Xt_inv)], X_test[~np.isnan(X_test)])\n\n for i in range(X.shape[1]):\n # train only on non-NaN\n est.fit(_get_valid_samples_by_column(X_train, i))\n # check transforming with NaN works even when training without NaN\n with pytest.warns(None) as records:\n Xt_col = est.transform(X_test[:, [i]])\n assert len(records) == 0\n assert_allclose(Xt_col, Xt[:, [i]])\n # check non-NaN is handled as before - the 1st column is all nan\n if not np.isnan(X_test[:, i]).all():\n Xt_col_nonan = est.transform(\n _get_valid_samples_by_column(X_test, i))\n assert_array_equal(Xt_col_nonan,\n Xt_col[~np.isnan(Xt_col.squeeze())])\n\n if support_sparse:\n est_dense = clone(est)\n est_sparse = clone(est)\n\n with pytest.warns(None) as records:\n Xt_dense = est_dense.fit(X_train).transform(X_test)\n Xt_inv_dense = est_dense.inverse_transform(Xt_dense)\n assert len(records) == 0\n for sparse_constructor in (sparse.csr_matrix, sparse.csc_matrix,\n sparse.bsr_matrix, sparse.coo_matrix,\n sparse.dia_matrix, sparse.dok_matrix,\n sparse.lil_matrix):\n # check that the dense and sparse inputs lead to the same results\n # precompute the matrix to avoid catching side warnings\n X_train_sp = sparse_constructor(X_train)\n X_test_sp = sparse_constructor(X_test)\n with pytest.warns(None) as records:\n warnings.simplefilter('ignore', PendingDeprecationWarning)\n Xt_sp = est_sparse.fit(X_train_sp).transform(X_test_sp)\n assert len(records) == 0\n assert_allclose(Xt_sp.A, Xt_dense)\n with pytest.warns(None) as records:\n warnings.simplefilter('ignore', PendingDeprecationWarning)\n Xt_inv_sp = est_sparse.inverse_transform(Xt_sp)\n assert len(records) == 0\n assert_allclose(Xt_inv_sp.A, Xt_inv_dense)\n", "\nfrom time import time\nfrom distutils.version import LooseVersion\nfrom collections import namedtuple\nimport warnings\n\nimport scipy\nfrom scipy import stats\nimport numpy as np\n\nfrom ..base import clone\nfrom ..exceptions import ConvergenceWarning\nfrom ..preprocessing import normalize\nfrom ..utils import check_array, check_random_state, _safe_indexing\nfrom ..utils.validation import FLOAT_DTYPES, check_is_fitted\nfrom ..utils import is_scalar_nan\nfrom ..utils._mask import _get_mask\n\nfrom ._base import _BaseImputer\nfrom ._base import SimpleImputer\nfrom ._base import _check_inputs_dtype\n\n\n_ImputerTriplet = namedtuple('_ImputerTriplet', ['feat_idx',\n 'neighbor_feat_idx',\n 'estimator'])\n\n\nclass IterativeImputer(_BaseImputer):\n \"\"\"Multivariate imputer that estimates each feature from all the others.\n\n A strategy for imputing missing values by modeling each feature with\n missing values as a function of other features in a round-robin fashion.\n\n Read more in the :ref:`User Guide <iterative_imputer>`.\n\n .. note::\n\n This estimator is still **experimental** for now: the predictions\n and the API might change without any deprecation cycle. To use it,\n you need to explicitly import ``enable_iterative_imputer``::\n\n >>> # explicitly require this experimental feature\n >>> from sklearn.experimental import enable_iterative_imputer # noqa\n >>> # now you can import normally from sklearn.impute\n >>> from sklearn.impute import IterativeImputer\n\n Parameters\n ----------\n estimator : estimator object, default=BayesianRidge()\n The estimator to use at each step of the round-robin imputation.\n If ``sample_posterior`` is True, the estimator must support\n ``return_std`` in its ``predict`` method.\n\n missing_values : int, np.nan, default=np.nan\n The placeholder for the missing values. All occurrences of\n ``missing_values`` will be imputed.\n\n sample_posterior : boolean, default=False\n Whether to sample from the (Gaussian) predictive posterior of the\n fitted estimator for each imputation. Estimator must support\n ``return_std`` in its ``predict`` method if set to ``True``. Set to\n ``True`` if using ``IterativeImputer`` for multiple imputations.\n\n max_iter : int, default=10\n Maximum number of imputation rounds to perform before returning the\n imputations computed during the final round. A round is a single\n imputation of each feature with missing values. The stopping criterion\n is met once `abs(max(X_t - X_{t-1}))/abs(max(X[known_vals]))` < tol,\n where `X_t` is `X` at iteration `t. Note that early stopping is only\n applied if ``sample_posterior=False``.\n\n tol : float, default=1e-3\n Tolerance of the stopping condition.\n\n n_nearest_features : int, default=None\n Number of other features to use to estimate the missing values of\n each feature column. Nearness between features is measured using\n the absolute correlation coefficient between each feature pair (after\n initial imputation). To ensure coverage of features throughout the\n imputation process, the neighbor features are not necessarily nearest,\n but are drawn with probability proportional to correlation for each\n imputed target feature. Can provide significant speed-up when the\n number of features is huge. If ``None``, all features will be used.\n\n initial_strategy : str, default='mean'\n Which strategy to use to initialize the missing values. Same as the\n ``strategy`` parameter in :class:`sklearn.impute.SimpleImputer`\n Valid values: {\"mean\", \"median\", \"most_frequent\", or \"constant\"}.\n\n imputation_order : str, default='ascending'\n The order in which the features will be imputed. Possible values:\n\n \"ascending\"\n From features with fewest missing values to most.\n \"descending\"\n From features with most missing values to fewest.\n \"roman\"\n Left to right.\n \"arabic\"\n Right to left.\n \"random\"\n A random order for each round.\n\n skip_complete : boolean, default=False\n If ``True`` then features with missing values during ``transform``\n which did not have any missing values during ``fit`` will be imputed\n with the initial imputation method only. Set to ``True`` if you have\n many features with no missing values at both ``fit`` and ``transform``\n time to save compute.\n\n min_value : float, default=None\n Minimum possible imputed value. Default of ``None`` will set minimum\n to negative infinity.\n\n max_value : float, default=None\n Maximum possible imputed value. Default of ``None`` will set maximum\n to positive infinity.\n\n verbose : int, default=0\n Verbosity flag, controls the debug messages that are issued\n as functions are evaluated. The higher, the more verbose. Can be 0, 1,\n or 2.\n\n random_state : int, RandomState instance or None, default=None\n The seed of the pseudo random number generator to use. Randomizes\n selection of estimator features if n_nearest_features is not None, the\n ``imputation_order`` if ``random``, and the sampling from posterior if\n ``sample_posterior`` is True. Use an integer for determinism.\n See :term:`the Glossary <random_state>`.\n\n add_indicator : boolean, default=False\n If True, a :class:`MissingIndicator` transform will stack onto output\n of the imputer's transform. This allows a predictive estimator\n to account for missingness despite imputation. If a feature has no\n missing values at fit/train time, the feature won't appear on\n the missing indicator even if there are missing values at\n transform/test time.\n\n Attributes\n ----------\n initial_imputer_ : object of type :class:`sklearn.impute.SimpleImputer`\n Imputer used to initialize the missing values.\n\n imputation_sequence_ : list of tuples\n Each tuple has ``(feat_idx, neighbor_feat_idx, estimator)``, where\n ``feat_idx`` is the current feature to be imputed,\n ``neighbor_feat_idx`` is the array of other features used to impute the\n current feature, and ``estimator`` is the trained estimator used for\n the imputation. Length is ``self.n_features_with_missing_ *\n self.n_iter_``.\n\n n_iter_ : int\n Number of iteration rounds that occurred. Will be less than\n ``self.max_iter`` if early stopping criterion was reached.\n\n n_features_with_missing_ : int\n Number of features with missing values.\n\n indicator_ : :class:`sklearn.impute.MissingIndicator`\n Indicator used to add binary indicators for missing values.\n ``None`` if add_indicator is False.\n\n random_state_ : RandomState instance\n RandomState instance that is generated either from a seed, the random\n number generator or by `np.random`.\n\n See also\n --------\n SimpleImputer : Univariate imputation of missing values.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.experimental import enable_iterative_imputer \n >>> from sklearn.impute import IterativeImputer\n >>> imp_mean = IterativeImputer(random_state=0)\n >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])\n IterativeImputer(random_state=0)\n >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]\n >>> imp_mean.transform(X)\n array([[ 6.9584..., 2. , 3. ],\n [ 4. , 2.6000..., 6. ],\n [10. , 4.9999..., 9. ]])\n\n Notes\n -----\n To support imputation in inductive mode we store each feature's estimator\n during the ``fit`` phase, and predict without refitting (in order) during\n the ``transform`` phase.\n\n Features which contain all missing values at ``fit`` are discarded upon\n ``transform``.\n\n References\n ----------\n .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). \"mice:\n Multivariate Imputation by Chained Equations in R\". Journal of\n Statistical Software 45: 1-67.\n <https://www.jstatsoft.org/article/view/v045i03>`_\n\n .. [2] `S. F. Buck, (1960). \"A Method of Estimation of Missing Values in\n Multivariate Data Suitable for use with an Electronic Computer\".\n Journal of the Royal Statistical Society 22(2): 302-306.\n <https://www.jstor.org/stable/2984099>`_\n \"\"\"\n\n def __init__(self,\n estimator=None,\n missing_values=np.nan,\n sample_posterior=False,\n max_iter=10,\n tol=1e-3,\n n_nearest_features=None,\n initial_strategy=\"mean\",\n imputation_order='ascending',\n skip_complete=False,\n min_value=None,\n max_value=None,\n verbose=0,\n random_state=None,\n add_indicator=False):\n super().__init__(\n missing_values=missing_values,\n add_indicator=add_indicator\n )\n\n self.estimator = estimator\n self.sample_posterior = sample_posterior\n self.max_iter = max_iter\n self.tol = tol\n self.n_nearest_features = n_nearest_features\n self.initial_strategy = initial_strategy\n self.imputation_order = imputation_order\n self.skip_complete = skip_complete\n self.min_value = min_value\n self.max_value = max_value\n self.verbose = verbose\n self.random_state = random_state\n\n def _impute_one_feature(self,\n X_filled,\n mask_missing_values,\n feat_idx,\n neighbor_feat_idx,\n estimator=None,\n fit_mode=True):\n \"\"\"Impute a single feature from the others provided.\n\n This function predicts the missing values of one of the features using\n the current estimates of all the other features. The ``estimator`` must\n support ``return_std=True`` in its ``predict`` method for this function\n to work.\n\n Parameters\n ----------\n X_filled : ndarray\n Input data with the most recent imputations.\n\n mask_missing_values : ndarray\n Input data's missing indicator matrix.\n\n feat_idx : int\n Index of the feature currently being imputed.\n\n neighbor_feat_idx : ndarray\n Indices of the features to be used in imputing ``feat_idx``.\n\n estimator : object\n The estimator to use at this step of the round-robin imputation.\n If ``sample_posterior`` is True, the estimator must support\n ``return_std`` in its ``predict`` method.\n If None, it will be cloned from self._estimator.\n\n fit_mode : boolean, default=True\n Whether to fit and predict with the estimator or just predict.\n\n Returns\n -------\n X_filled : ndarray\n Input data with ``X_filled[missing_row_mask, feat_idx]`` updated.\n\n estimator : estimator with sklearn API\n The fitted estimator used to impute\n ``X_filled[missing_row_mask, feat_idx]``.\n \"\"\"\n if estimator is None and fit_mode is False:\n raise ValueError(\"If fit_mode is False, then an already-fitted \"\n \"estimator should be passed in.\")\n\n if estimator is None:\n estimator = clone(self._estimator)\n\n missing_row_mask = mask_missing_values[:, feat_idx]\n if fit_mode:\n X_train = _safe_indexing(X_filled[:, neighbor_feat_idx],\n ~missing_row_mask)\n y_train = _safe_indexing(X_filled[:, feat_idx],\n ~missing_row_mask)\n estimator.fit(X_train, y_train)\n\n # if no missing values, don't predict\n if np.sum(missing_row_mask) == 0:\n return X_filled, estimator\n\n # get posterior samples if there is at least one missing value\n X_test = _safe_indexing(X_filled[:, neighbor_feat_idx],\n missing_row_mask)\n if self.sample_posterior:\n mus, sigmas = estimator.predict(X_test, return_std=True)\n imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)\n # two types of problems: (1) non-positive sigmas\n # (2) mus outside legal range of min_value and max_value\n # (results in inf sample)\n positive_sigmas = sigmas > 0\n imputed_values[~positive_sigmas] = mus[~positive_sigmas]\n mus_too_low = mus < self._min_value\n imputed_values[mus_too_low] = self._min_value\n mus_too_high = mus > self._max_value\n imputed_values[mus_too_high] = self._max_value\n # the rest can be sampled without statistical issues\n inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high\n mus = mus[inrange_mask]\n sigmas = sigmas[inrange_mask]\n a = (self._min_value - mus) / sigmas\n b = (self._max_value - mus) / sigmas\n\n if scipy.__version__ < LooseVersion('0.18'):\n # bug with vector-valued `a` in old scipy\n imputed_values[inrange_mask] = [\n stats.truncnorm(a=a_, b=b_,\n loc=loc_, scale=scale_).rvs(\n random_state=self.random_state_)\n for a_, b_, loc_, scale_\n in zip(a, b, mus, sigmas)]\n else:\n truncated_normal = stats.truncnorm(a=a, b=b,\n loc=mus, scale=sigmas)\n imputed_values[inrange_mask] = truncated_normal.rvs(\n random_state=self.random_state_)\n else:\n imputed_values = estimator.predict(X_test)\n imputed_values = np.clip(imputed_values,\n self._min_value,\n self._max_value)\n\n # update the feature\n X_filled[missing_row_mask, feat_idx] = imputed_values\n return X_filled, estimator\n\n def _get_neighbor_feat_idx(self,\n n_features,\n feat_idx,\n abs_corr_mat):\n \"\"\"Get a list of other features to predict ``feat_idx``.\n\n If self.n_nearest_features is less than or equal to the total\n number of features, then use a probability proportional to the absolute\n correlation between ``feat_idx`` and each other feature to randomly\n choose a subsample of the other features (without replacement).\n\n Parameters\n ----------\n n_features : int\n Number of features in ``X``.\n\n feat_idx : int\n Index of the feature currently being imputed.\n\n abs_corr_mat : ndarray, shape (n_features, n_features)\n Absolute correlation matrix of ``X``. The diagonal has been zeroed\n out and each feature has been normalized to sum to 1. Can be None.\n\n Returns\n -------\n neighbor_feat_idx : array-like\n The features to use to impute ``feat_idx``.\n \"\"\"\n if (self.n_nearest_features is not None and\n self.n_nearest_features < n_features):\n p = abs_corr_mat[:, feat_idx]\n neighbor_feat_idx = self.random_state_.choice(\n np.arange(n_features), self.n_nearest_features, replace=False,\n p=p)\n else:\n inds_left = np.arange(feat_idx)\n inds_right = np.arange(feat_idx + 1, n_features)\n neighbor_feat_idx = np.concatenate((inds_left, inds_right))\n return neighbor_feat_idx\n\n def _get_ordered_idx(self, mask_missing_values):\n \"\"\"Decide in what order we will update the features.\n\n As a homage to the MICE R package, we will have 4 main options of\n how to order the updates, and use a random order if anything else\n is specified.\n\n Also, this function skips features which have no missing values.\n\n Parameters\n ----------\n mask_missing_values : array-like, shape (n_samples, n_features)\n Input data's missing indicator matrix, where \"n_samples\" is the\n number of samples and \"n_features\" is the number of features.\n\n Returns\n -------\n ordered_idx : ndarray, shape (n_features,)\n The order in which to impute the features.\n \"\"\"\n frac_of_missing_values = mask_missing_values.mean(axis=0)\n if self.skip_complete:\n missing_values_idx = np.flatnonzero(frac_of_missing_values)\n else:\n missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])\n if self.imputation_order == 'roman':\n ordered_idx = missing_values_idx\n elif self.imputation_order == 'arabic':\n ordered_idx = missing_values_idx[::-1]\n elif self.imputation_order == 'ascending':\n n = len(frac_of_missing_values) - len(missing_values_idx)\n ordered_idx = np.argsort(frac_of_missing_values,\n kind='mergesort')[n:]\n elif self.imputation_order == 'descending':\n n = len(frac_of_missing_values) - len(missing_values_idx)\n ordered_idx = np.argsort(frac_of_missing_values,\n kind='mergesort')[n:][::-1]\n elif self.imputation_order == 'random':\n ordered_idx = missing_values_idx\n self.random_state_.shuffle(ordered_idx)\n else:\n raise ValueError(\"Got an invalid imputation order: '{0}'. It must \"\n \"be one of the following: 'roman', 'arabic', \"\n \"'ascending', 'descending', or \"\n \"'random'.\".format(self.imputation_order))\n return ordered_idx\n\n def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):\n \"\"\"Get absolute correlation matrix between features.\n\n Parameters\n ----------\n X_filled : ndarray, shape (n_samples, n_features)\n Input data with the most recent imputations.\n\n tolerance : float, default=1e-6\n ``abs_corr_mat`` can have nans, which will be replaced\n with ``tolerance``.\n\n Returns\n -------\n abs_corr_mat : ndarray, shape (n_features, n_features)\n Absolute correlation matrix of ``X`` at the beginning of the\n current round. The diagonal has been zeroed out and each feature's\n absolute correlations with all others have been normalized to sum\n to 1.\n \"\"\"\n n_features = X_filled.shape[1]\n if (self.n_nearest_features is None or\n self.n_nearest_features >= n_features):\n return None\n with np.errstate(invalid='ignore'):\n # if a feature in the neighboorhood has only a single value\n # (e.g., categorical feature), the std. dev. will be null and\n # np.corrcoef will raise a warning due to a division by zero\n abs_corr_mat = np.abs(np.corrcoef(X_filled.T))\n # np.corrcoef is not defined for features with zero std\n abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance\n # ensures exploration, i.e. at least some probability of sampling\n np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)\n # features are not their own neighbors\n np.fill_diagonal(abs_corr_mat, 0)\n # needs to sum to 1 for np.random.choice sampling\n abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)\n return abs_corr_mat\n\n def _initial_imputation(self, X):\n \"\"\"Perform initial imputation for input X.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n Input data, where \"n_samples\" is the number of samples and\n \"n_features\" is the number of features.\n\n Returns\n -------\n Xt : ndarray, shape (n_samples, n_features)\n Input data, where \"n_samples\" is the number of samples and\n \"n_features\" is the number of features.\n\n X_filled : ndarray, shape (n_samples, n_features)\n Input data with the most recent imputations.\n\n mask_missing_values : ndarray, shape (n_samples, n_features)\n Input data's missing indicator matrix, where \"n_samples\" is the\n number of samples and \"n_features\" is the number of features.\n \"\"\"\n if is_scalar_nan(self.missing_values):\n force_all_finite = \"allow-nan\"\n else:\n force_all_finite = True\n\n X = check_array(X, dtype=FLOAT_DTYPES, order=\"F\",\n force_all_finite=force_all_finite)\n _check_inputs_dtype(X, self.missing_values)\n\n mask_missing_values = _get_mask(X, self.missing_values)\n if self.initial_imputer_ is None:\n self.initial_imputer_ = SimpleImputer(\n missing_values=self.missing_values,\n strategy=self.initial_strategy\n )\n X_filled = self.initial_imputer_.fit_transform(X)\n else:\n X_filled = self.initial_imputer_.transform(X)\n\n valid_mask = np.flatnonzero(np.logical_not(\n np.isnan(self.initial_imputer_.statistics_)))\n Xt = X[:, valid_mask]\n mask_missing_values = mask_missing_values[:, valid_mask]\n\n return Xt, X_filled, mask_missing_values\n\n def fit_transform(self, X, y=None):\n \"\"\"Fits the imputer on X and return the transformed X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data, where \"n_samples\" is the number of samples and\n \"n_features\" is the number of features.\n\n y : ignored.\n\n Returns\n -------\n Xt : array-like, shape (n_samples, n_features)\n The imputed input data.\n \"\"\"\n self.random_state_ = getattr(self, \"random_state_\",\n check_random_state(self.random_state))\n\n if self.max_iter < 0:\n raise ValueError(\n \"'max_iter' should be a positive integer. Got {} instead.\"\n .format(self.max_iter))\n\n if self.tol < 0:\n raise ValueError(\n \"'tol' should be a non-negative float. Got {} instead.\"\n .format(self.tol)\n )\n\n if self.estimator is None:\n from ..linear_model import BayesianRidge\n self._estimator = BayesianRidge()\n else:\n self._estimator = clone(self.estimator)\n\n if hasattr(self._estimator, 'random_state'):\n self._estimator.random_state = self.random_state_\n\n self.imputation_sequence_ = []\n\n self._min_value = -np.inf if self.min_value is None else self.min_value\n self._max_value = np.inf if self.max_value is None else self.max_value\n\n self.initial_imputer_ = None\n super()._fit_indicator(X)\n X_indicator = super()._transform_indicator(X)\n X, Xt, mask_missing_values = self._initial_imputation(X)\n if self.max_iter == 0 or np.all(mask_missing_values):\n self.n_iter_ = 0\n return super()._concatenate_indicator(Xt, X_indicator)\n\n # Edge case: a single feature. We return the initial ...\n if Xt.shape[1] == 1:\n self.n_iter_ = 0\n return super()._concatenate_indicator(Xt, X_indicator)\n\n # order in which to impute\n # note this is probably too slow for large feature data (d > 100000)\n # and a better way would be good.\n # see: https://goo.gl/KyCNwj and subsequent comments\n ordered_idx = self._get_ordered_idx(mask_missing_values)\n self.n_features_with_missing_ = len(ordered_idx)\n\n abs_corr_mat = self._get_abs_corr_mat(Xt)\n\n n_samples, n_features = Xt.shape\n if self.verbose > 0:\n print(\"[IterativeImputer] Completing matrix with shape %s\"\n % (X.shape,))\n start_t = time()\n if not self.sample_posterior:\n Xt_previous = Xt.copy()\n normalized_tol = self.tol * np.max(\n np.abs(X[~mask_missing_values])\n )\n for self.n_iter_ in range(1, self.max_iter + 1):\n if self.imputation_order == 'random':\n ordered_idx = self._get_ordered_idx(mask_missing_values)\n\n for feat_idx in ordered_idx:\n neighbor_feat_idx = self._get_neighbor_feat_idx(n_features,\n feat_idx,\n abs_corr_mat)\n Xt, estimator = self._impute_one_feature(\n Xt, mask_missing_values, feat_idx, neighbor_feat_idx,\n estimator=None, fit_mode=True)\n estimator_triplet = _ImputerTriplet(feat_idx,\n neighbor_feat_idx,\n estimator)\n self.imputation_sequence_.append(estimator_triplet)\n\n if self.verbose > 1:\n print('[IterativeImputer] Ending imputation round '\n '%d/%d, elapsed time %0.2f'\n % (self.n_iter_, self.max_iter, time() - start_t))\n\n if not self.sample_posterior:\n inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf,\n axis=None)\n if self.verbose > 0:\n print('[IterativeImputer] '\n 'Change: {}, scaled tolerance: {} '.format(\n inf_norm, normalized_tol))\n if inf_norm < normalized_tol:\n if self.verbose > 0:\n print('[IterativeImputer] Early stopping criterion '\n 'reached.')\n break\n Xt_previous = Xt.copy()\n else:\n if not self.sample_posterior:\n warnings.warn(\"[IterativeImputer] Early stopping criterion not\"\n \" reached.\", ConvergenceWarning)\n Xt[~mask_missing_values] = X[~mask_missing_values]\n return super()._concatenate_indicator(Xt, X_indicator)\n\n def transform(self, X):\n \"\"\"Imputes all missing values in X.\n\n Note that this is stochastic, and that if random_state is not fixed,\n repeated calls, or permuted input, will yield different results.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n Xt : array-like, shape (n_samples, n_features)\n The imputed input data.\n \"\"\"\n check_is_fitted(self)\n\n X_indicator = super()._transform_indicator(X)\n X, Xt, mask_missing_values = self._initial_imputation(X)\n\n if self.n_iter_ == 0 or np.all(mask_missing_values):\n return super()._concatenate_indicator(Xt, X_indicator)\n\n imputations_per_round = len(self.imputation_sequence_) // self.n_iter_\n i_rnd = 0\n if self.verbose > 0:\n print(\"[IterativeImputer] Completing matrix with shape %s\"\n % (X.shape,))\n start_t = time()\n for it, estimator_triplet in enumerate(self.imputation_sequence_):\n Xt, _ = self._impute_one_feature(\n Xt,\n mask_missing_values,\n estimator_triplet.feat_idx,\n estimator_triplet.neighbor_feat_idx,\n estimator=estimator_triplet.estimator,\n fit_mode=False\n )\n if not (it + 1) % imputations_per_round:\n if self.verbose > 1:\n print('[IterativeImputer] Ending imputation round '\n '%d/%d, elapsed time %0.2f'\n % (i_rnd + 1, self.n_iter_, time() - start_t))\n i_rnd += 1\n\n Xt[~mask_missing_values] = X[~mask_missing_values]\n\n return super()._concatenate_indicator(Xt, X_indicator)\n\n def fit(self, X, y=None):\n \"\"\"Fits the imputer on X and return self.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Input data, where \"n_samples\" is the number of samples and\n \"n_features\" is the number of features.\n\n y : ignored\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n self.fit_transform(X)\n return self\n", "\"\"\"\n=============================================\nIntegration and ODEs (:mod:`scipy.integrate`)\n=============================================\n\n.. currentmodule:: scipy.integrate\n\nIntegrating functions, given function object\n============================================\n\n.. autosummary::\n :toctree: generated/\n\n quad -- General purpose integration\n quad_vec -- General purpose integration of vector-valued functions\n dblquad -- General purpose double integration\n tplquad -- General purpose triple integration\n nquad -- General purpose n-dimensional integration\n fixed_quad -- Integrate func(x) using Gaussian quadrature of order n\n quadrature -- Integrate with given tolerance using Gaussian quadrature\n romberg -- Integrate func using Romberg integration\n quad_explain -- Print information for use of quad\n newton_cotes -- Weights and error coefficient for Newton-Cotes integration\n IntegrationWarning -- Warning on issues during integration\n\nIntegrating functions, given fixed samples\n==========================================\n\n.. autosummary::\n :toctree: generated/\n\n trapz -- Use trapezoidal rule to compute integral.\n cumtrapz -- Use trapezoidal rule to cumulatively compute integral.\n simps -- Use Simpson's rule to compute integral from samples.\n romb -- Use Romberg Integration to compute integral from\n -- (2**k + 1) evenly-spaced samples.\n\n.. seealso::\n\n :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian\n quadrature roots and weights for other weighting factors and regions.\n\nSolving initial value problems for ODE systems\n==============================================\n\nThe solvers are implemented as individual classes which can be used directly\n(low-level usage) or through a convenience function.\n\n.. autosummary::\n :toctree: generated/\n\n solve_ivp -- Convenient function for ODE integration.\n RK23 -- Explicit Runge-Kutta solver of order 3(2).\n RK45 -- Explicit Runge-Kutta solver of order 5(4).\n DOP853 -- Explicit Runge-Kutta solver of order 8.\n Radau -- Implicit Runge-Kutta solver of order 5.\n BDF -- Implicit multi-step variable order (1 to 5) solver.\n LSODA -- LSODA solver from ODEPACK Fortran package.\n OdeSolver -- Base class for ODE solvers.\n DenseOutput -- Local interpolant for computing a dense output.\n OdeSolution -- Class which represents a continuous ODE solution.\n\n\nOld API\n-------\n\nThese are the routines developed earlier for scipy. They wrap older solvers\nimplemented in Fortran (mostly ODEPACK). While the interface to them is not\nparticularly convenient and certain features are missing compared to the new\nAPI, the solvers themselves are of good quality and work fast as compiled\nFortran code. In some cases it might be worth using this old API.\n\n.. autosummary::\n :toctree: generated/\n\n odeint -- General integration of ordinary differential equations.\n ode -- Integrate ODE using VODE and ZVODE routines.\n complex_ode -- Convert a complex-valued ODE to real-valued and integrate.\n\n\nSolving boundary value problems for ODE systems\n===============================================\n\n.. autosummary::\n :toctree: generated/\n\n solve_bvp -- Solve a boundary value problem for a system of ODEs.\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom .quadrature import *\nfrom .odepack import *\nfrom .quadpack import *\nfrom ._ode import *\nfrom ._bvp import solve_bvp\nfrom ._ivp import (solve_ivp, OdeSolution, DenseOutput,\n OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)\nfrom ._quad_vec import quad_vec\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom scipy._lib._testutils import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n", "\"\"\"Test the search module\"\"\"\n\nfrom collections.abc import Iterable, Sized\nfrom io import StringIO\nfrom itertools import chain, product\nfrom functools import partial\nimport pickle\nimport sys\nfrom types import GeneratorType\nimport re\n\nimport numpy as np\nimport scipy.sparse as sp\nimport pytest\n\nfrom sklearn.utils.fixes import sp_version\nfrom sklearn.utils._testing import assert_raises\nfrom sklearn.utils._testing import assert_warns\nfrom sklearn.utils._testing import assert_warns_message\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n\nfrom scipy.stats import bernoulli, expon, uniform\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.base import clone\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.datasets import make_classification\nfrom sklearn.datasets import make_blobs\nfrom sklearn.datasets import make_multilabel_classification\n\nfrom sklearn.model_selection import fit_grid_point\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import LeaveOneGroupOut\nfrom sklearn.model_selection import LeavePGroupsOut\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import GroupShuffleSplit\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import ParameterGrid\nfrom sklearn.model_selection import ParameterSampler\nfrom sklearn.model_selection._search import BaseSearchCV\n\nfrom sklearn.model_selection._validation import FitFailedWarning\n\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import make_scorer\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n\nfrom sklearn.model_selection.tests.common import OneTimeSplitter\n\n\n# Neither of the following two estimators inherit from BaseEstimator,\n# to test hyperparameter search on user-defined classifiers.\nclass MockClassifier:\n \"\"\"Dummy classifier to test the parameter search algorithms\"\"\"\n def __init__(self, foo_param=0):\n self.foo_param = foo_param\n\n def fit(self, X, Y):\n assert len(X) == len(Y)\n self.classes_ = np.unique(Y)\n return self\n\n def predict(self, T):\n return T.shape[0]\n\n def transform(self, X):\n return X + self.foo_param\n\n def inverse_transform(self, X):\n return X - self.foo_param\n\n predict_proba = predict\n predict_log_proba = predict\n decision_function = predict\n\n def score(self, X=None, Y=None):\n if self.foo_param > 1:\n score = 1.\n else:\n score = 0.\n return score\n\n def get_params(self, deep=False):\n return {'foo_param': self.foo_param}\n\n def set_params(self, **params):\n self.foo_param = params['foo_param']\n return self\n\n\nclass LinearSVCNoScore(LinearSVC):\n \"\"\"An LinearSVC classifier that has no score method.\"\"\"\n @property\n def score(self):\n raise AttributeError\n\n\nX = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\ny = np.array([1, 1, 2, 2])\n\n\ndef assert_grid_iter_equals_getitem(grid):\n assert list(grid) == [grid[i] for i in range(len(grid))]\n\n@pytest.mark.parametrize(\"klass\", [ParameterGrid,\n partial(ParameterSampler, n_iter=10)])\n@pytest.mark.parametrize(\n \"input, error_type, error_message\",\n [(0, TypeError, r'Parameter .* is not a dict or a list \\(0\\)'),\n ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \\(0\\)'),\n ({'foo': 0}, TypeError, \"Parameter.* value is not iterable .*\"\n r\"\\(key='foo', value=0\\)\")]\n)\ndef test_validate_parameter_input(klass, input, error_type, error_message):\n with pytest.raises(error_type, match=error_message):\n klass(input)\n\n\ndef test_parameter_grid():\n\n # Test basic properties of ParameterGrid.\n params1 = {\"foo\": [1, 2, 3]}\n grid1 = ParameterGrid(params1)\n assert isinstance(grid1, Iterable)\n assert isinstance(grid1, Sized)\n assert len(grid1) == 3\n assert_grid_iter_equals_getitem(grid1)\n\n params2 = {\"foo\": [4, 2],\n \"bar\": [\"ham\", \"spam\", \"eggs\"]}\n grid2 = ParameterGrid(params2)\n assert len(grid2) == 6\n\n # loop to assert we can iterate over the grid multiple times\n for i in range(2):\n # tuple + chain transforms {\"a\": 1, \"b\": 2} to (\"a\", 1, \"b\", 2)\n points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)\n assert (points ==\n set((\"bar\", x, \"foo\", y)\n for x, y in product(params2[\"bar\"], params2[\"foo\"])))\n assert_grid_iter_equals_getitem(grid2)\n\n # Special case: empty grid (useful to get default estimator settings)\n empty = ParameterGrid({})\n assert len(empty) == 1\n assert list(empty) == [{}]\n assert_grid_iter_equals_getitem(empty)\n assert_raises(IndexError, lambda: empty[1])\n\n has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])\n assert len(has_empty) == 4\n assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}]\n assert_grid_iter_equals_getitem(has_empty)\n\n\ndef test_grid_search():\n # Test that the best estimator contains the right value for foo_param\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)\n # make sure it selects the smallest parameter in case of ties\n old_stdout = sys.stdout\n sys.stdout = StringIO()\n grid_search.fit(X, y)\n sys.stdout = old_stdout\n assert grid_search.best_estimator_.foo_param == 2\n\n assert_array_equal(grid_search.cv_results_[\"param_foo_param\"].data,\n [1, 2, 3])\n\n # Smoke test the score etc:\n grid_search.score(X, y)\n grid_search.predict_proba(X)\n grid_search.decision_function(X)\n grid_search.transform(X)\n\n # Test exception handling on scoring\n grid_search.scoring = 'sklearn'\n assert_raises(ValueError, grid_search.fit, X, y)\n\n\ndef test_grid_search_pipeline_steps():\n # check that parameters that are estimators are cloned before fitting\n pipe = Pipeline([('regressor', LinearRegression())])\n param_grid = {'regressor': [LinearRegression(), Ridge()]}\n grid_search = GridSearchCV(pipe, param_grid, cv=2)\n grid_search.fit(X, y)\n regressor_results = grid_search.cv_results_['param_regressor']\n assert isinstance(regressor_results[0], LinearRegression)\n assert isinstance(regressor_results[1], Ridge)\n assert not hasattr(regressor_results[0], 'coef_')\n assert not hasattr(regressor_results[1], 'coef_')\n assert regressor_results[0] is not grid_search.best_estimator_\n assert regressor_results[1] is not grid_search.best_estimator_\n # check that we didn't modify the parameter grid that was passed\n assert not hasattr(param_grid['regressor'][0], 'coef_')\n assert not hasattr(param_grid['regressor'][1], 'coef_')\n\n\n@pytest.mark.parametrize(\"SearchCV\", [GridSearchCV, RandomizedSearchCV])\ndef test_SearchCV_with_fit_params(SearchCV):\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])\n searcher = SearchCV(\n clf, {'foo_param': [1, 2, 3]}, cv=2, error_score=\"raise\"\n )\n\n # The CheckingClassifier generates an assertion error if\n # a parameter is missing or has length != len(X).\n err_msg = r\"Expected fit parameter\\(s\\) \\['eggs'\\] not seen.\"\n with pytest.raises(AssertionError, match=err_msg):\n searcher.fit(X, y, spam=np.ones(10))\n\n err_msg = \"Fit parameter spam has length 1; expected\"\n with pytest.raises(AssertionError, match=err_msg):\n searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))\n searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))\n\n\n@ignore_warnings\ndef test_grid_search_no_score():\n # Test grid-search on classifier that has no score function.\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n clf_no_score = LinearSVCNoScore(random_state=0)\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')\n grid_search.fit(X, y)\n\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},\n scoring='accuracy')\n # smoketest grid search\n grid_search_no_score.fit(X, y)\n\n # check that best params are equal\n assert grid_search_no_score.best_params_ == grid_search.best_params_\n # check that we can call score and that it gives the correct result\n assert grid_search.score(X, y) == grid_search_no_score.score(X, y)\n\n # giving no scoring function raises an error\n grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})\n assert_raise_message(TypeError, \"no scoring\", grid_search_no_score.fit,\n [[1]])\n\n\ndef test_grid_search_score_method():\n X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,\n random_state=0)\n clf = LinearSVC(random_state=0)\n grid = {'C': [.1]}\n\n search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)\n search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)\n search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,\n scoring='roc_auc'\n ).fit(X, y)\n search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)\n\n # Check warning only occurs in situation where behavior changed:\n # estimator requires score method to compete with scoring parameter\n score_no_scoring = search_no_scoring.score(X, y)\n score_accuracy = search_accuracy.score(X, y)\n score_no_score_auc = search_no_score_method_auc.score(X, y)\n score_auc = search_auc.score(X, y)\n\n # ensure the test is sane\n assert score_auc < 1.0\n assert score_accuracy < 1.0\n assert score_auc != score_accuracy\n\n assert_almost_equal(score_accuracy, score_no_scoring)\n assert_almost_equal(score_auc, score_no_score_auc)\n\n\ndef test_grid_search_groups():\n # Check if ValueError (when groups is None) propagates to GridSearchCV\n # And also check if groups is correctly passed to the cv object\n rng = np.random.RandomState(0)\n\n X, y = make_classification(n_samples=15, n_classes=2, random_state=0)\n groups = rng.randint(0, 3, 15)\n\n clf = LinearSVC(random_state=0)\n grid = {'C': [1]}\n\n group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2),\n GroupKFold(n_splits=3), GroupShuffleSplit()]\n for cv in group_cvs:\n gs = GridSearchCV(clf, grid, cv=cv)\n assert_raise_message(ValueError,\n \"The 'groups' parameter should not be None.\",\n gs.fit, X, y)\n gs.fit(X, y, groups=groups)\n\n non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]\n for cv in non_group_cvs:\n gs = GridSearchCV(clf, grid, cv=cv)\n # Should not raise an error\n gs.fit(X, y)\n\n\ndef test_classes__property():\n # Test that classes_ property matches best_estimator_.classes_\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n Cs = [.1, 1, 10]\n\n grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})\n grid_search.fit(X, y)\n assert_array_equal(grid_search.best_estimator_.classes_,\n grid_search.classes_)\n\n # Test that regressors do not have a classes_ attribute\n grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})\n grid_search.fit(X, y)\n assert not hasattr(grid_search, 'classes_')\n\n # Test that the grid searcher has no classes_ attribute before it's fit\n grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})\n assert not hasattr(grid_search, 'classes_')\n\n # Test that the grid searcher has no classes_ attribute without a refit\n grid_search = GridSearchCV(LinearSVC(random_state=0),\n {'C': Cs}, refit=False)\n grid_search.fit(X, y)\n assert not hasattr(grid_search, 'classes_')\n\n\ndef test_trivial_cv_results_attr():\n # Test search over a \"grid\" with only one point.\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3)\n grid_search.fit(X, y)\n assert hasattr(grid_search, \"cv_results_\")\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3)\n random_search.fit(X, y)\n assert hasattr(grid_search, \"cv_results_\")\n\n\ndef test_no_refit():\n # Test that GSCV can be used for model selection alone without refitting\n clf = MockClassifier()\n for scoring in [None, ['accuracy', 'precision']]:\n grid_search = GridSearchCV(\n clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3\n )\n grid_search.fit(X, y)\n assert not hasattr(grid_search, \"best_estimator_\") and \\\n hasattr(grid_search, \"best_index_\") and \\\n hasattr(grid_search, \"best_params_\")\n\n # Make sure the functions predict/transform etc raise meaningful\n # error messages\n for fn_name in ('predict', 'predict_proba', 'predict_log_proba',\n 'transform', 'inverse_transform'):\n assert_raise_message(NotFittedError,\n ('refit=False. %s is available only after '\n 'refitting on the best parameters'\n % fn_name), getattr(grid_search, fn_name), X)\n\n # Test that an invalid refit param raises appropriate error messages\n for refit in [\"\", 5, True, 'recall', 'accuracy']:\n assert_raise_message(ValueError, \"For multi-metric scoring, the \"\n \"parameter refit must be set to a scorer key\",\n GridSearchCV(clf, {}, refit=refit,\n scoring={'acc': 'accuracy',\n 'prec': 'precision'}\n ).fit,\n X, y)\n\n\ndef test_grid_search_error():\n # Test that grid search will capture errors on data with different length\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, X_[:180], y_)\n\n\ndef test_grid_search_one_grid_point():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n param_dict = {\"C\": [1.0], \"kernel\": [\"rbf\"], \"gamma\": [0.1]}\n\n clf = SVC(gamma='auto')\n cv = GridSearchCV(clf, param_dict)\n cv.fit(X_, y_)\n\n clf = SVC(C=1.0, kernel=\"rbf\", gamma=0.1)\n clf.fit(X_, y_)\n\n assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)\n\n\ndef test_grid_search_when_param_grid_includes_range():\n # Test that the best estimator contains the right value for foo_param\n clf = MockClassifier()\n grid_search = None\n grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3)\n grid_search.fit(X, y)\n assert grid_search.best_estimator_.foo_param == 2\n\n\ndef test_grid_search_bad_param_grid():\n param_dict = {\"C\": 1.0}\n clf = SVC(gamma='auto')\n assert_raise_message(\n ValueError,\n \"Parameter values for parameter (C) need to be a sequence\"\n \"(but not a string) or np.ndarray.\",\n GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": []}\n clf = SVC()\n assert_raise_message(\n ValueError,\n \"Parameter values for parameter (C) need to be a non-empty sequence.\",\n GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": \"1,2,3\"}\n clf = SVC(gamma='auto')\n assert_raise_message(\n ValueError,\n \"Parameter values for parameter (C) need to be a sequence\"\n \"(but not a string) or np.ndarray.\",\n GridSearchCV, clf, param_dict)\n\n param_dict = {\"C\": np.ones((3, 2))}\n clf = SVC()\n assert_raises(ValueError, GridSearchCV, clf, param_dict)\n\n\ndef test_grid_search_sparse():\n # Test that grid search works with both dense and sparse matrices\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(X_[:180].tocoo(), y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert np.mean(y_pred == y_pred2) >= .9\n assert C == C2\n\n\ndef test_grid_search_sparse_scoring():\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred = cv.predict(X_[180:])\n C = cv.best_estimator_.C\n\n X_ = sp.csr_matrix(X_)\n clf = LinearSVC()\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=\"f1\")\n cv.fit(X_[:180], y_[:180])\n y_pred2 = cv.predict(X_[180:])\n C2 = cv.best_estimator_.C\n\n assert_array_equal(y_pred, y_pred2)\n assert C == C2\n # Smoke test the score\n # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),\n # cv.score(X_[:180], y[:180]))\n\n # test loss where greater is worse\n def f1_loss(y_true_, y_pred_):\n return -f1_score(y_true_, y_pred_)\n F1Loss = make_scorer(f1_loss, greater_is_better=False)\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)\n cv.fit(X_[:180], y_[:180])\n y_pred3 = cv.predict(X_[180:])\n C3 = cv.best_estimator_.C\n\n assert C == C3\n assert_array_equal(y_pred, y_pred3)\n\n\ndef test_grid_search_precomputed_kernel():\n # Test that grid search works when the input features are given in the\n # form of a precomputed kernel matrix\n X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)\n\n # compute the training kernel matrix corresponding to the linear kernel\n K_train = np.dot(X_[:180], X_[:180].T)\n y_train = y_[:180]\n\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n cv.fit(K_train, y_train)\n\n assert cv.best_score_ >= 0\n\n # compute the test kernel matrix\n K_test = np.dot(X_[180:], X_[:180].T)\n y_test = y_[180:]\n\n y_pred = cv.predict(K_test)\n\n assert np.mean(y_pred == y_test) >= 0\n\n # test error is raised when the precomputed kernel is not array-like\n # or sparse\n assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)\n\n\ndef test_grid_search_precomputed_kernel_error_nonsquare():\n # Test that grid search returns an error with a non-square precomputed\n # training kernel matrix\n K_train = np.zeros((10, 20))\n y_train = np.ones((10, ))\n clf = SVC(kernel='precomputed')\n cv = GridSearchCV(clf, {'C': [0.1, 1.0]})\n assert_raises(ValueError, cv.fit, K_train, y_train)\n\n\nclass BrokenClassifier(BaseEstimator):\n \"\"\"Broken classifier that cannot be fit twice\"\"\"\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y):\n assert not hasattr(self, 'has_been_fit_')\n self.has_been_fit_ = True\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n\n@ignore_warnings\ndef test_refit():\n # Regression test for bug in refitting\n # Simulates re-fitting a broken estimator; this used to break with\n # sparse SVMs.\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],\n scoring=\"precision\", refit=True)\n clf.fit(X, y)\n\n\ndef test_refit_callable():\n \"\"\"\n Test refit=callable, which adds flexibility in identifying the\n \"best\" estimator.\n \"\"\"\n def refit_callable(cv_results):\n \"\"\"\n A dummy function tests `refit=callable` interface.\n Return the index of a model that has the least\n `mean_test_score`.\n \"\"\"\n # Fit a dummy clf with `refit=True` to get a list of keys in\n # clf.cv_results_.\n X, y = make_classification(n_samples=100, n_features=4,\n random_state=42)\n clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},\n scoring='precision', refit=True)\n clf.fit(X, y)\n # Ensure that `best_index_ != 0` for this dummy clf\n assert clf.best_index_ != 0\n\n # Assert every key matches those in `cv_results`\n for key in clf.cv_results_.keys():\n assert key in cv_results\n\n return cv_results['mean_test_score'].argmin()\n\n X, y = make_classification(n_samples=100, n_features=4,\n random_state=42)\n clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},\n scoring='precision', refit=refit_callable)\n clf.fit(X, y)\n\n assert clf.best_index_ == 0\n # Ensure `best_score_` is disabled when using `refit=callable`\n assert not hasattr(clf, 'best_score_')\n\n\ndef test_refit_callable_invalid_type():\n \"\"\"\n Test implementation catches the errors when 'best_index_' returns an\n invalid result.\n \"\"\"\n def refit_callable_invalid_type(cv_results):\n \"\"\"\n A dummy function tests when returned 'best_index_' is not integer.\n \"\"\"\n return None\n\n X, y = make_classification(n_samples=100, n_features=4,\n random_state=42)\n\n clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]},\n scoring='precision', refit=refit_callable_invalid_type)\n with pytest.raises(TypeError,\n match='best_index_ returned is not an integer'):\n clf.fit(X, y)\n\n\n@pytest.mark.parametrize('out_bound_value', [-1, 2])\n@pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV])\ndef test_refit_callable_out_bound(out_bound_value, search_cv):\n \"\"\"\n Test implementation catches the errors when 'best_index_' returns an\n out of bound result.\n \"\"\"\n def refit_callable_out_bound(cv_results):\n \"\"\"\n A dummy function tests when returned 'best_index_' is out of bounds.\n \"\"\"\n return out_bound_value\n\n X, y = make_classification(n_samples=100, n_features=4,\n random_state=42)\n\n clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]},\n scoring='precision', refit=refit_callable_out_bound)\n with pytest.raises(IndexError, match='best_index_ index out of range'):\n clf.fit(X, y)\n\n\ndef test_refit_callable_multi_metric():\n \"\"\"\n Test refit=callable in multiple metric evaluation setting\n \"\"\"\n def refit_callable(cv_results):\n \"\"\"\n A dummy function tests `refit=callable` interface.\n Return the index of a model that has the least\n `mean_test_prec`.\n \"\"\"\n assert 'mean_test_prec' in cv_results\n return cv_results['mean_test_prec'].argmin()\n\n X, y = make_classification(n_samples=100, n_features=4,\n random_state=42)\n scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'}\n clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},\n scoring=scoring, refit=refit_callable)\n clf.fit(X, y)\n\n assert clf.best_index_ == 0\n # Ensure `best_score_` is disabled when using `refit=callable`\n assert not hasattr(clf, 'best_score_')\n\n\ndef test_gridsearch_nd():\n # Pass X as list in GridSearchCV\n X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)\n y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)\n check_X = lambda x: x.shape[1:] == (5, 3, 2)\n check_y = lambda x: x.shape[1:] == (7, 11)\n clf = CheckingClassifier(check_X=check_X, check_y=check_y)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_4d, y_3d).score(X, y)\n assert hasattr(grid_search, \"cv_results_\")\n\n\ndef test_X_as_list():\n # Pass X as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))\n cv = KFold(n_splits=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X.tolist(), y).score(X, y)\n assert hasattr(grid_search, \"cv_results_\")\n\n\ndef test_y_as_list():\n # Pass y as list in GridSearchCV\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))\n cv = KFold(n_splits=3)\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n grid_search.fit(X, y.tolist()).score(X, y)\n assert hasattr(grid_search, \"cv_results_\")\n\n\n@ignore_warnings\ndef test_pandas_input():\n # check cross_val_score doesn't destroy pandas dataframe\n types = [(MockDataFrame, MockDataFrame)]\n try:\n from pandas import Series, DataFrame\n types.append((DataFrame, Series))\n except ImportError:\n pass\n\n X = np.arange(100).reshape(10, 10)\n y = np.array([0] * 5 + [1] * 5)\n\n for InputFeatureType, TargetType in types:\n # X dataframe, y series\n X_df, y_ser = InputFeatureType(X), TargetType(y)\n\n def check_df(x):\n return isinstance(x, InputFeatureType)\n\n def check_series(x):\n return isinstance(x, TargetType)\n\n clf = CheckingClassifier(check_X=check_df, check_y=check_series)\n\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})\n grid_search.fit(X_df, y_ser).score(X_df, y_ser)\n grid_search.predict(X_df)\n assert hasattr(grid_search, \"cv_results_\")\n\n\ndef test_unsupervised_grid_search():\n # test grid-search with unsupervised estimator\n X, y = make_blobs(n_samples=50, random_state=0)\n km = KMeans(random_state=0, init=\"random\", n_init=1)\n\n # Multi-metric evaluation unsupervised\n scoring = ['adjusted_rand_score', 'fowlkes_mallows_score']\n for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']:\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),\n scoring=scoring, refit=refit)\n grid_search.fit(X, y)\n # Both ARI and FMS can find the right number :)\n assert grid_search.best_params_[\"n_clusters\"] == 3\n\n # Single metric evaluation unsupervised\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),\n scoring='fowlkes_mallows_score')\n grid_search.fit(X, y)\n assert grid_search.best_params_[\"n_clusters\"] == 3\n\n # Now without a score, and without y\n grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))\n grid_search.fit(X)\n assert grid_search.best_params_[\"n_clusters\"] == 4\n\n\ndef test_gridsearch_no_predict():\n # test grid-search with an estimator without predict.\n # slight duplication of a test from KDE\n def custom_scoring(estimator, X):\n return 42 if estimator.bandwidth == .1 else 0\n X, _ = make_blobs(cluster_std=.1, random_state=1,\n centers=[[0, 1], [1, 0], [0, 0]])\n search = GridSearchCV(KernelDensity(),\n param_grid=dict(bandwidth=[.01, .1, 1]),\n scoring=custom_scoring)\n search.fit(X)\n assert search.best_params_['bandwidth'] == .1\n assert search.best_score_ == 42\n\n\ndef test_param_sampler():\n # test basic properties of param sampler\n param_distributions = {\"kernel\": [\"rbf\", \"linear\"],\n \"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n samples = [x for x in sampler]\n assert len(samples) == 10\n for sample in samples:\n assert sample[\"kernel\"] in [\"rbf\", \"linear\"]\n assert 0 <= sample[\"C\"] <= 1\n\n # test that repeated calls yield identical parameters\n param_distributions = {\"C\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=3, random_state=0)\n assert [x for x in sampler] == [x for x in sampler]\n\n if sp_version >= (0, 16):\n param_distributions = {\"C\": uniform(0, 1)}\n sampler = ParameterSampler(param_distributions=param_distributions,\n n_iter=10, random_state=0)\n assert [x for x in sampler] == [x for x in sampler]\n\n\ndef check_cv_results_array_types(search, param_keys, score_keys):\n # Check if the search `cv_results`'s array are of correct types\n cv_results = search.cv_results_\n assert all(isinstance(cv_results[param], np.ma.MaskedArray)\n for param in param_keys)\n assert all(cv_results[key].dtype == object for key in param_keys)\n assert not any(isinstance(cv_results[key], np.ma.MaskedArray)\n for key in score_keys)\n assert all(cv_results[key].dtype == np.float64\n for key in score_keys if not key.startswith('rank'))\n\n scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']\n\n for key in scorer_keys:\n assert cv_results['rank_test_%s' % key].dtype == np.int32\n\n\ndef check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):\n # Test the search.cv_results_ contains all the required results\n assert_array_equal(sorted(cv_results.keys()),\n sorted(param_keys + score_keys + ('params',)))\n assert all(cv_results[key].shape == (n_cand,)\n for key in param_keys + score_keys)\n\n\n@pytest.mark.filterwarnings(\"ignore:The parameter 'iid' is deprecated\") # 0.24\ndef test_grid_search_cv_results():\n X, y = make_classification(n_samples=50, n_features=4,\n random_state=42)\n\n n_splits = 3\n n_grid_points = 6\n params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),\n dict(kernel=['poly', ], degree=[1, 2])]\n\n param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')\n score_keys = ('mean_test_score', 'mean_train_score',\n 'rank_test_score',\n 'split0_test_score', 'split1_test_score',\n 'split2_test_score',\n 'split0_train_score', 'split1_train_score',\n 'split2_train_score',\n 'std_test_score', 'std_train_score',\n 'mean_fit_time', 'std_fit_time',\n 'mean_score_time', 'std_score_time')\n n_candidates = n_grid_points\n\n for iid in (False, True):\n search = GridSearchCV(SVC(), cv=n_splits, iid=iid,\n param_grid=params, return_train_score=True)\n search.fit(X, y)\n assert iid == search.iid\n cv_results = search.cv_results_\n # Check if score and timing are reasonable\n assert all(cv_results['rank_test_score'] >= 1)\n assert (all(cv_results[k] >= 0) for k in score_keys\n if k != 'rank_test_score')\n assert (all(cv_results[k] <= 1) for k in score_keys\n if 'time' not in k and\n k != 'rank_test_score')\n # Check cv_results structure\n check_cv_results_array_types(search, param_keys, score_keys)\n check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)\n # Check masking\n cv_results = search.cv_results_\n n_candidates = len(search.cv_results_['params'])\n assert all((cv_results['param_C'].mask[i] and\n cv_results['param_gamma'].mask[i] and\n not cv_results['param_degree'].mask[i])\n for i in range(n_candidates)\n if cv_results['param_kernel'][i] == 'linear')\n assert all((not cv_results['param_C'].mask[i] and\n not cv_results['param_gamma'].mask[i] and\n cv_results['param_degree'].mask[i])\n for i in range(n_candidates)\n if cv_results['param_kernel'][i] == 'rbf')\n\n\n@pytest.mark.filterwarnings(\"ignore:The parameter 'iid' is deprecated\") # 0.24\ndef test_random_search_cv_results():\n X, y = make_classification(n_samples=50, n_features=4, random_state=42)\n\n n_splits = 3\n n_search_iter = 30\n\n params = [{'kernel': ['rbf'], 'C': expon(scale=10),\n 'gamma': expon(scale=0.1)},\n {'kernel': ['poly'], 'degree': [2, 3]}]\n param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')\n score_keys = ('mean_test_score', 'mean_train_score',\n 'rank_test_score',\n 'split0_test_score', 'split1_test_score',\n 'split2_test_score',\n 'split0_train_score', 'split1_train_score',\n 'split2_train_score',\n 'std_test_score', 'std_train_score',\n 'mean_fit_time', 'std_fit_time',\n 'mean_score_time', 'std_score_time')\n n_cand = n_search_iter\n\n for iid in (False, True):\n search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,\n cv=n_splits, iid=iid,\n param_distributions=params,\n return_train_score=True)\n search.fit(X, y)\n assert iid == search.iid\n cv_results = search.cv_results_\n # Check results structure\n check_cv_results_array_types(search, param_keys, score_keys)\n check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)\n n_candidates = len(search.cv_results_['params'])\n assert all((cv_results['param_C'].mask[i] and\n cv_results['param_gamma'].mask[i] and\n not cv_results['param_degree'].mask[i])\n for i in range(n_candidates)\n if cv_results['param_kernel'][i] == 'linear')\n assert all((not cv_results['param_C'].mask[i] and\n not cv_results['param_gamma'].mask[i] and\n cv_results['param_degree'].mask[i])\n for i in range(n_candidates)\n if cv_results['param_kernel'][i] == 'rbf')\n\n\n@pytest.mark.parametrize(\n \"SearchCV, specialized_params\",\n [(GridSearchCV, {'param_grid': {'C': [1, 10]}}),\n (RandomizedSearchCV,\n {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})]\n)\ndef test_search_default_iid(SearchCV, specialized_params):\n # Test the IID parameter\n # noise-free simple 2d-data\n X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n # split dataset into two folds that are not iid\n # first one contains data of all 4 blobs, second only from two.\n mask = np.ones(X.shape[0], dtype=np.bool)\n mask[np.where(y == 1)[0][::2]] = 0\n mask[np.where(y == 2)[0][::2]] = 0\n # this leads to perfect classification on one fold and a score of 1/3 on\n # the other\n # create \"cv\" for splits\n cv = [[mask, ~mask], [~mask, mask]]\n\n common_params = {'estimator': SVC(), 'cv': cv,\n 'return_train_score': True}\n search = SearchCV(**common_params, **specialized_params)\n search.fit(X, y)\n\n test_cv_scores = np.array(\n [search.cv_results_['split%d_test_score' % s][0]\n for s in range(search.n_splits_)]\n )\n test_mean = search.cv_results_['mean_test_score'][0]\n test_std = search.cv_results_['std_test_score'][0]\n\n train_cv_scores = np.array(\n [search.cv_results_['split%d_train_score' % s][0]\n for s in range(search.n_splits_)]\n )\n train_mean = search.cv_results_['mean_train_score'][0]\n train_std = search.cv_results_['std_train_score'][0]\n\n assert search.cv_results_['param_C'][0] == 1\n # scores are the same as above\n assert_allclose(test_cv_scores, [1, 1. / 3.])\n assert_allclose(train_cv_scores, [1, 1])\n # Unweighted mean/std is used\n assert test_mean == pytest.approx(np.mean(test_cv_scores))\n assert test_std == pytest.approx(np.std(test_cv_scores))\n\n # For the train scores, we do not take a weighted mean irrespective of\n # i.i.d. or not\n assert train_mean == pytest.approx(1)\n assert train_std == pytest.approx(0)\n\n\n@pytest.mark.filterwarnings(\"ignore:The parameter 'iid' is deprecated\") # 0.24\ndef test_search_iid_param():\n # Test the IID parameter\n # noise-free simple 2d-data\n X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n # split dataset into two folds that are not iid\n # first one contains data of all 4 blobs, second only from two.\n mask = np.ones(X.shape[0], dtype=np.bool)\n mask[np.where(y == 1)[0][::2]] = 0\n mask[np.where(y == 2)[0][::2]] = 0\n # this leads to perfect classification on one fold and a score of 1/3 on\n # the other\n # create \"cv\" for splits\n cv = [[mask, ~mask], [~mask, mask]]\n # once with iid=True (default)\n grid_search = GridSearchCV(SVC(gamma='auto'), param_grid={'C': [1, 10]},\n cv=cv, return_train_score=True, iid=True)\n random_search = RandomizedSearchCV(SVC(gamma='auto'), n_iter=2,\n param_distributions={'C': [1, 10]},\n cv=cv, iid=True,\n return_train_score=True)\n for search in (grid_search, random_search):\n search.fit(X, y)\n assert search.iid or search.iid is None\n\n test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'\n % s_i][0]\n for s_i in range(search.n_splits_)))\n test_mean = search.cv_results_['mean_test_score'][0]\n test_std = search.cv_results_['std_test_score'][0]\n\n train_cv_scores = np.array(list(search.cv_results_['split%d_train_'\n 'score' % s_i][0]\n for s_i in range(search.n_splits_)))\n train_mean = search.cv_results_['mean_train_score'][0]\n train_std = search.cv_results_['std_train_score'][0]\n\n # Test the first candidate\n assert search.cv_results_['param_C'][0] == 1\n assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])\n assert_array_almost_equal(train_cv_scores, [1, 1])\n\n # for first split, 1/4 of dataset is in test, for second 3/4.\n # take weighted average and weighted std\n expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.\n expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +\n 3. / 4 * (expected_test_mean - 1. / 3.) **\n 2)\n assert_almost_equal(test_mean, expected_test_mean)\n assert_almost_equal(test_std, expected_test_std)\n assert_array_almost_equal(test_cv_scores,\n cross_val_score(SVC(C=1, gamma='auto'), X,\n y, cv=cv))\n\n # For the train scores, we do not take a weighted mean irrespective of\n # i.i.d. or not\n assert_almost_equal(train_mean, 1)\n assert_almost_equal(train_std, 0)\n\n # once with iid=False\n grid_search = GridSearchCV(SVC(gamma='auto'),\n param_grid={'C': [1, 10]},\n cv=cv, iid=False, return_train_score=True)\n random_search = RandomizedSearchCV(SVC(gamma='auto'), n_iter=2,\n param_distributions={'C': [1, 10]},\n cv=cv, iid=False,\n return_train_score=True)\n\n for search in (grid_search, random_search):\n search.fit(X, y)\n assert not search.iid\n\n test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'\n % s][0]\n for s in range(search.n_splits_)))\n test_mean = search.cv_results_['mean_test_score'][0]\n test_std = search.cv_results_['std_test_score'][0]\n\n train_cv_scores = np.array(list(search.cv_results_['split%d_train_'\n 'score' % s][0]\n for s in range(search.n_splits_)))\n train_mean = search.cv_results_['mean_train_score'][0]\n train_std = search.cv_results_['std_train_score'][0]\n\n assert search.cv_results_['param_C'][0] == 1\n # scores are the same as above\n assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])\n # Unweighted mean/std is used\n assert_almost_equal(test_mean, np.mean(test_cv_scores))\n assert_almost_equal(test_std, np.std(test_cv_scores))\n\n # For the train scores, we do not take a weighted mean irrespective of\n # i.i.d. or not\n assert_almost_equal(train_mean, 1)\n assert_almost_equal(train_std, 0)\n\n\n@pytest.mark.filterwarnings(\"ignore:The parameter 'iid' is deprecated\") # 0.24\ndef test_grid_search_cv_results_multimetric():\n X, y = make_classification(n_samples=50, n_features=4, random_state=42)\n\n n_splits = 3\n params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),\n dict(kernel=['poly', ], degree=[1, 2])]\n\n for iid in (False, True):\n grid_searches = []\n for scoring in ({'accuracy': make_scorer(accuracy_score),\n 'recall': make_scorer(recall_score)},\n 'accuracy', 'recall'):\n grid_search = GridSearchCV(SVC(), cv=n_splits,\n iid=iid, param_grid=params,\n scoring=scoring, refit=False)\n grid_search.fit(X, y)\n assert grid_search.iid == iid\n grid_searches.append(grid_search)\n\n compare_cv_results_multimetric_with_single(*grid_searches, iid=iid)\n\n\n@pytest.mark.filterwarnings(\"ignore:The parameter 'iid' is deprecated\") # 0.24\ndef test_random_search_cv_results_multimetric():\n X, y = make_classification(n_samples=50, n_features=4, random_state=42)\n\n n_splits = 3\n n_search_iter = 30\n scoring = ('accuracy', 'recall')\n\n # Scipy 0.12's stats dists do not accept seed, hence we use param grid\n params = dict(C=np.logspace(-4, 1, 3),\n gamma=np.logspace(-5, 0, 3, base=0.1))\n for iid in (True, False):\n for refit in (True, False):\n random_searches = []\n for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'):\n # If True, for multi-metric pass refit='accuracy'\n if refit:\n probability = True\n refit = 'accuracy' if isinstance(scoring, tuple) else refit\n else:\n probability = False\n clf = SVC(probability=probability, random_state=42)\n random_search = RandomizedSearchCV(clf, n_iter=n_search_iter,\n cv=n_splits, iid=iid,\n param_distributions=params,\n scoring=scoring,\n refit=refit, random_state=0)\n random_search.fit(X, y)\n random_searches.append(random_search)\n\n compare_cv_results_multimetric_with_single(*random_searches,\n iid=iid)\n if refit:\n compare_refit_methods_when_refit_with_acc(\n random_searches[0], random_searches[1], refit)\n\n\n@pytest.mark.filterwarnings(\"ignore:The parameter 'iid' is deprecated\") # 0.24\ndef compare_cv_results_multimetric_with_single(\n search_multi, search_acc, search_rec, iid):\n \"\"\"Compare multi-metric cv_results with the ensemble of multiple\n single metric cv_results from single metric grid/random search\"\"\"\n\n assert search_multi.iid == iid\n assert search_multi.multimetric_\n assert_array_equal(sorted(search_multi.scorer_),\n ('accuracy', 'recall'))\n\n cv_results_multi = search_multi.cv_results_\n cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v\n for k, v in search_acc.cv_results_.items()}\n cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v\n for k, v in search_rec.cv_results_.items()})\n\n # Check if score and timing are reasonable, also checks if the keys\n # are present\n assert all((np.all(cv_results_multi[k] <= 1) for k in (\n 'mean_score_time', 'std_score_time', 'mean_fit_time',\n 'std_fit_time')))\n\n # Compare the keys, other than time keys, among multi-metric and\n # single metric grid search results. np.testing.assert_equal performs a\n # deep nested comparison of the two cv_results dicts\n np.testing.assert_equal({k: v for k, v in cv_results_multi.items()\n if not k.endswith('_time')},\n {k: v for k, v in cv_results_acc_rec.items()\n if not k.endswith('_time')})\n\n\ndef compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit):\n \"\"\"Compare refit multi-metric search methods with single metric methods\"\"\"\n if refit:\n assert search_multi.refit == 'accuracy'\n else:\n assert not search_multi.refit\n assert search_acc.refit == refit\n\n X, y = make_blobs(n_samples=100, n_features=4, random_state=42)\n for method in ('predict', 'predict_proba', 'predict_log_proba'):\n assert_almost_equal(getattr(search_multi, method)(X),\n getattr(search_acc, method)(X))\n assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y))\n for key in ('best_index_', 'best_score_', 'best_params_'):\n assert getattr(search_multi, key) == getattr(search_acc, key)\n\n\ndef test_search_cv_results_rank_tie_breaking():\n X, y = make_blobs(n_samples=50, random_state=42)\n\n # The two C values are close enough to give similar models\n # which would result in a tie of their mean cv-scores\n param_grid = {'C': [1, 1.001, 0.001]}\n\n grid_search = GridSearchCV(SVC(), param_grid=param_grid,\n return_train_score=True)\n random_search = RandomizedSearchCV(SVC(), n_iter=3,\n param_distributions=param_grid,\n return_train_score=True)\n\n for search in (grid_search, random_search):\n search.fit(X, y)\n cv_results = search.cv_results_\n # Check tie breaking strategy -\n # Check that there is a tie in the mean scores between\n # candidates 1 and 2 alone\n assert_almost_equal(cv_results['mean_test_score'][0],\n cv_results['mean_test_score'][1])\n assert_almost_equal(cv_results['mean_train_score'][0],\n cv_results['mean_train_score'][1])\n assert not np.allclose(cv_results['mean_test_score'][1],\n cv_results['mean_test_score'][2])\n assert not np.allclose(cv_results['mean_train_score'][1],\n cv_results['mean_train_score'][2])\n # 'min' rank should be assigned to the tied candidates\n assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])\n\n\ndef test_search_cv_results_none_param():\n X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]\n estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())\n est_parameters = {\"random_state\": [0, None]}\n cv = KFold()\n\n for est in estimators:\n grid_search = GridSearchCV(est, est_parameters, cv=cv,\n ).fit(X, y)\n assert_array_equal(grid_search.cv_results_['param_random_state'],\n [0, None])\n\n\n@ignore_warnings()\ndef test_search_cv_timing():\n svc = LinearSVC(random_state=0)\n\n X = [[1, ], [2, ], [3, ], [4, ]]\n y = [0, 1, 1, 0]\n\n gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)\n rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)\n\n for search in (gs, rs):\n search.fit(X, y)\n for key in ['mean_fit_time', 'std_fit_time']:\n # NOTE The precision of time.time in windows is not high\n # enough for the fit/score times to be non-zero for trivial X and y\n assert np.all(search.cv_results_[key] >= 0)\n assert np.all(search.cv_results_[key] < 1)\n\n for key in ['mean_score_time', 'std_score_time']:\n assert search.cv_results_[key][1] >= 0\n assert search.cv_results_[key][0] == 0.0\n assert np.all(search.cv_results_[key] < 1)\n\n assert hasattr(search, \"refit_time_\")\n assert isinstance(search.refit_time_, float)\n assert search.refit_time_ >= 0\n\n\ndef test_grid_search_correct_score_results():\n # test that correct scores are used\n n_splits = 3\n clf = LinearSVC(random_state=0)\n X, y = make_blobs(random_state=0, centers=2)\n Cs = [.1, 1, 10]\n for score in ['f1', 'roc_auc']:\n grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)\n cv_results = grid_search.fit(X, y).cv_results_\n\n # Test scorer names\n result_keys = list(cv_results.keys())\n expected_keys = ((\"mean_test_score\", \"rank_test_score\") +\n tuple(\"split%d_test_score\" % cv_i\n for cv_i in range(n_splits)))\n assert all(np.in1d(expected_keys, result_keys))\n\n cv = StratifiedKFold(n_splits=n_splits)\n n_splits = grid_search.n_splits_\n for candidate_i, C in enumerate(Cs):\n clf.set_params(C=C)\n cv_scores = np.array(\n list(grid_search.cv_results_['split%d_test_score'\n % s][candidate_i]\n for s in range(n_splits)))\n for i, (train, test) in enumerate(cv.split(X, y)):\n clf.fit(X[train], y[train])\n if score == \"f1\":\n correct_score = f1_score(y[test], clf.predict(X[test]))\n elif score == \"roc_auc\":\n dec = clf.decision_function(X[test])\n correct_score = roc_auc_score(y[test], dec)\n assert_almost_equal(correct_score, cv_scores[i])\n\n\ndef test_fit_grid_point():\n X, y = make_classification(random_state=0)\n cv = StratifiedKFold()\n svc = LinearSVC(random_state=0)\n scorer = make_scorer(accuracy_score)\n\n for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}):\n for train, test in cv.split(X, y):\n this_scores, this_params, n_test_samples = fit_grid_point(\n X, y, clone(svc), params, train, test,\n scorer, verbose=False)\n\n est = clone(svc).set_params(**params)\n est.fit(X[train], y[train])\n expected_score = scorer(est, X[test], y[test])\n\n # Test the return values of fit_grid_point\n assert_almost_equal(this_scores, expected_score)\n assert params == this_params\n assert n_test_samples == test.size\n\n # Should raise an error upon multimetric scorer\n assert_raise_message(ValueError, \"For evaluating multiple scores, use \"\n \"sklearn.model_selection.cross_validate instead.\",\n fit_grid_point, X, y, svc, params, train, test,\n {'score': scorer}, verbose=True)\n\n\ndef test_pickle():\n # Test that a fit search can be pickled\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3)\n grid_search.fit(X, y)\n grid_search_pickled = pickle.loads(pickle.dumps(grid_search))\n assert_array_almost_equal(grid_search.predict(X),\n grid_search_pickled.predict(X))\n\n random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},\n refit=True, n_iter=3, cv=3)\n random_search.fit(X, y)\n random_search_pickled = pickle.loads(pickle.dumps(random_search))\n assert_array_almost_equal(random_search.predict(X),\n random_search_pickled.predict(X))\n\n\n@pytest.mark.filterwarnings('ignore: The default value of multioutput') # 0.23\ndef test_grid_search_with_multioutput_data():\n # Test search with multi-output estimator\n\n X, y = make_multilabel_classification(return_indicator=True,\n random_state=0)\n\n est_parameters = {\"max_depth\": [1, 2, 3, 4]}\n cv = KFold()\n\n estimators = [DecisionTreeRegressor(random_state=0),\n DecisionTreeClassifier(random_state=0)]\n\n # Test with grid search cv\n for est in estimators:\n grid_search = GridSearchCV(est, est_parameters, cv=cv)\n grid_search.fit(X, y)\n res_params = grid_search.cv_results_['params']\n for cand_i in range(len(res_params)):\n est.set_params(**res_params[cand_i])\n\n for i, (train, test) in enumerate(cv.split(X, y)):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(\n correct_score,\n grid_search.cv_results_['split%d_test_score' % i][cand_i])\n\n # Test with a randomized search\n for est in estimators:\n random_search = RandomizedSearchCV(est, est_parameters,\n cv=cv, n_iter=3)\n random_search.fit(X, y)\n res_params = random_search.cv_results_['params']\n for cand_i in range(len(res_params)):\n est.set_params(**res_params[cand_i])\n\n for i, (train, test) in enumerate(cv.split(X, y)):\n est.fit(X[train], y[train])\n correct_score = est.score(X[test], y[test])\n assert_almost_equal(\n correct_score,\n random_search.cv_results_['split%d_test_score'\n % i][cand_i])\n\n\ndef test_predict_proba_disabled():\n # Test predict_proba when disabled on estimator.\n X = np.arange(20).reshape(5, -1)\n y = [0, 0, 1, 1, 1]\n clf = SVC(probability=False)\n gs = GridSearchCV(clf, {}, cv=2).fit(X, y)\n assert not hasattr(gs, \"predict_proba\")\n\n\ndef test_grid_search_allows_nans():\n # Test GridSearchCV with SimpleImputer\n X = np.arange(20, dtype=np.float64).reshape(5, -1)\n X[2, :] = np.nan\n y = [0, 0, 1, 1, 1]\n p = Pipeline([\n ('imputer', SimpleImputer(strategy='mean', missing_values=np.nan)),\n ('classifier', MockClassifier()),\n ])\n GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)\n\n\nclass FailingClassifier(BaseEstimator):\n \"\"\"Classifier that raises a ValueError on fit()\"\"\"\n\n FAILING_PARAMETER = 2\n\n def __init__(self, parameter=None):\n self.parameter = parameter\n\n def fit(self, X, y=None):\n if self.parameter == FailingClassifier.FAILING_PARAMETER:\n raise ValueError(\"Failing classifier failed as required\")\n\n def predict(self, X):\n return np.zeros(X.shape[0])\n\n def score(self, X=None, Y=None):\n return 0.\n\n\ndef test_grid_search_failing_classifier():\n # GridSearchCV with on_error != 'raise'\n # Ensures that a warning is raised and score reset where appropriate.\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we only want to check that errors caused by fits\n # to individual folds will be caught and warnings raised instead. If\n # refit was done, then an exception would be raised on refit and not\n # caught by grid_search (expected behavior), and this would cause an\n # error in this test.\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=0.0)\n assert_warns(FitFailedWarning, gs.fit, X, y)\n n_candidates = len(gs.cv_results_['params'])\n\n # Ensure that grid scores were set to zero as required for those fits\n # that are expected to fail.\n def get_cand_scores(i):\n return np.array(list(gs.cv_results_['split%d_test_score' % s][i]\n for s in range(gs.n_splits_)))\n\n assert all((np.all(get_cand_scores(cand_i) == 0.0)\n for cand_i in range(n_candidates)\n if gs.cv_results_['param_parameter'][cand_i] ==\n FailingClassifier.FAILING_PARAMETER))\n\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score=float('nan'))\n assert_warns(FitFailedWarning, gs.fit, X, y)\n n_candidates = len(gs.cv_results_['params'])\n assert all(np.all(np.isnan(get_cand_scores(cand_i)))\n for cand_i in range(n_candidates)\n if gs.cv_results_['param_parameter'][cand_i] ==\n FailingClassifier.FAILING_PARAMETER)\n\n ranks = gs.cv_results_['rank_test_score']\n\n # Check that succeeded estimators have lower ranks\n assert ranks[0] <= 2 and ranks[1] <= 2\n # Check that failed estimator has the highest rank\n assert ranks[clf.FAILING_PARAMETER] == 3\n assert gs.best_index_ != clf.FAILING_PARAMETER\n\n\ndef test_grid_search_failing_classifier_raise():\n # GridSearchCV with on_error == 'raise' raises the error\n\n X, y = make_classification(n_samples=20, n_features=10, random_state=0)\n\n clf = FailingClassifier()\n\n # refit=False because we want to test the behaviour of the grid search part\n gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',\n refit=False, error_score='raise')\n\n # FailingClassifier issues a ValueError so this is what we look for.\n assert_raises(ValueError, gs.fit, X, y)\n\n\ndef test_parameters_sampler_replacement():\n # raise warning if n_iter is bigger than total parameter space\n params = [{'first': [0, 1], 'second': ['a', 'b', 'c']},\n {'third': ['two', 'values']}]\n sampler = ParameterSampler(params, n_iter=9)\n n_iter = 9\n grid_size = 8\n expected_warning = ('The total space of parameters %d is smaller '\n 'than n_iter=%d. Running %d iterations. For '\n 'exhaustive searches, use GridSearchCV.'\n % (grid_size, n_iter, grid_size))\n assert_warns_message(UserWarning, expected_warning,\n list, sampler)\n\n # degenerates to GridSearchCV if n_iter the same as grid_size\n sampler = ParameterSampler(params, n_iter=8)\n samples = list(sampler)\n assert len(samples) == 8\n for values in ParameterGrid(params):\n assert values in samples\n\n # test sampling without replacement in a large grid\n params = {'a': range(10), 'b': range(10), 'c': range(10)}\n sampler = ParameterSampler(params, n_iter=99, random_state=42)\n samples = list(sampler)\n assert len(samples) == 99\n hashable_samples = [\"a%db%dc%d\" % (p['a'], p['b'], p['c'])\n for p in samples]\n assert len(set(hashable_samples)) == 99\n\n # doesn't go into infinite loops\n params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}\n sampler = ParameterSampler(params_distribution, n_iter=7)\n samples = list(sampler)\n assert len(samples) == 7\n\n\ndef test_stochastic_gradient_loss_param():\n # Make sure the predict_proba works when loss is specified\n # as one of the parameters in the param_grid.\n param_grid = {\n 'loss': ['log'],\n }\n X = np.arange(24).reshape(6, -1)\n y = [0, 0, 0, 1, 1, 1]\n clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),\n param_grid=param_grid, cv=3)\n\n # When the estimator is not fitted, `predict_proba` is not available as the\n # loss is 'hinge'.\n assert not hasattr(clf, \"predict_proba\")\n clf.fit(X, y)\n clf.predict_proba(X)\n clf.predict_log_proba(X)\n\n # Make sure `predict_proba` is not available when setting loss=['hinge']\n # in param_grid\n param_grid = {\n 'loss': ['hinge'],\n }\n clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),\n param_grid=param_grid, cv=3)\n assert not hasattr(clf, \"predict_proba\")\n clf.fit(X, y)\n assert not hasattr(clf, \"predict_proba\")\n\n\ndef test_search_train_scores_set_to_false():\n X = np.arange(6).reshape(6, -1)\n y = [0, 0, 0, 1, 1, 1]\n clf = LinearSVC(random_state=0)\n\n gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]}, cv=3)\n gs.fit(X, y)\n\n\ndef test_grid_search_cv_splits_consistency():\n # Check if a one time iterable is accepted as a cv parameter.\n n_samples = 100\n n_splits = 5\n X, y = make_classification(n_samples=n_samples, random_state=0)\n\n gs = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.2, 0.3]},\n cv=OneTimeSplitter(n_splits=n_splits,\n n_samples=n_samples),\n return_train_score=True)\n gs.fit(X, y)\n\n gs2 = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.2, 0.3]},\n cv=KFold(n_splits=n_splits), return_train_score=True)\n gs2.fit(X, y)\n\n # Give generator as a cv parameter\n assert isinstance(KFold(n_splits=n_splits,\n shuffle=True, random_state=0).split(X, y),\n GeneratorType)\n gs3 = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.2, 0.3]},\n cv=KFold(n_splits=n_splits, shuffle=True,\n random_state=0).split(X, y),\n return_train_score=True)\n gs3.fit(X, y)\n\n gs4 = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.2, 0.3]},\n cv=KFold(n_splits=n_splits, shuffle=True,\n random_state=0), return_train_score=True)\n gs4.fit(X, y)\n\n def _pop_time_keys(cv_results):\n for key in ('mean_fit_time', 'std_fit_time',\n 'mean_score_time', 'std_score_time'):\n cv_results.pop(key)\n return cv_results\n\n # Check if generators are supported as cv and\n # that the splits are consistent\n np.testing.assert_equal(_pop_time_keys(gs3.cv_results_),\n _pop_time_keys(gs4.cv_results_))\n\n # OneTimeSplitter is a non-re-entrant cv where split can be called only\n # once if ``cv.split`` is called once per param setting in GridSearchCV.fit\n # the 2nd and 3rd parameter will not be evaluated as no train/test indices\n # will be generated for the 2nd and subsequent cv.split calls.\n # This is a check to make sure cv.split is not called once per param\n # setting.\n np.testing.assert_equal({k: v for k, v in gs.cv_results_.items()\n if not k.endswith('_time')},\n {k: v for k, v in gs2.cv_results_.items()\n if not k.endswith('_time')})\n\n # Check consistency of folds across the parameters\n gs = GridSearchCV(LinearSVC(random_state=0),\n param_grid={'C': [0.1, 0.1, 0.2, 0.2]},\n cv=KFold(n_splits=n_splits, shuffle=True),\n return_train_score=True)\n gs.fit(X, y)\n\n # As the first two param settings (C=0.1) and the next two param\n # settings (C=0.2) are same, the test and train scores must also be\n # same as long as the same train/test indices are generated for all\n # the cv splits, for both param setting\n for score_type in ('train', 'test'):\n per_param_scores = {}\n for param_i in range(4):\n per_param_scores[param_i] = list(\n gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]\n for s in range(5))\n\n assert_array_almost_equal(per_param_scores[0],\n per_param_scores[1])\n assert_array_almost_equal(per_param_scores[2],\n per_param_scores[3])\n\n\ndef test_transform_inverse_transform_round_trip():\n clf = MockClassifier()\n grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)\n\n grid_search.fit(X, y)\n X_round_trip = grid_search.inverse_transform(grid_search.transform(X))\n assert_array_equal(X, X_round_trip)\n\n\ndef test_custom_run_search():\n def check_results(results, gscv):\n exp_results = gscv.cv_results_\n assert sorted(results.keys()) == sorted(exp_results)\n for k in results:\n if not k.endswith('_time'):\n # XXX: results['params'] is a list :|\n results[k] = np.asanyarray(results[k])\n if results[k].dtype.kind == 'O':\n assert_array_equal(exp_results[k], results[k],\n err_msg='Checking ' + k)\n else:\n assert_allclose(exp_results[k], results[k],\n err_msg='Checking ' + k)\n\n def fit_grid(param_grid):\n return GridSearchCV(clf, param_grid,\n return_train_score=True).fit(X, y)\n\n class CustomSearchCV(BaseSearchCV):\n def __init__(self, estimator, **kwargs):\n super().__init__(estimator, **kwargs)\n\n def _run_search(self, evaluate):\n results = evaluate([{'max_depth': 1}, {'max_depth': 2}])\n check_results(results, fit_grid({'max_depth': [1, 2]}))\n results = evaluate([{'min_samples_split': 5},\n {'min_samples_split': 10}])\n check_results(results, fit_grid([{'max_depth': [1, 2]},\n {'min_samples_split': [5, 10]}]))\n\n # Using regressor to make sure each score differs\n clf = DecisionTreeRegressor(random_state=0)\n X, y = make_classification(n_samples=100, n_informative=4,\n random_state=0)\n mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y)\n gscv = fit_grid([{'max_depth': [1, 2]},\n {'min_samples_split': [5, 10]}])\n\n results = mycv.cv_results_\n check_results(results, gscv)\n # TODO: remove in v0.24, the deprecation goes away then.\n with pytest.warns(FutureWarning,\n match=\"attribute is to be deprecated from version 0.22\"):\n for attr in dir(gscv):\n if (attr[0].islower() and attr[-1:] == '_' and\n attr not in {'cv_results_', 'best_estimator_',\n 'refit_time_',\n }):\n assert getattr(gscv, attr) == getattr(mycv, attr), \\\n \"Attribute %s not equal\" % attr\n\n\ndef test__custom_fit_no_run_search():\n class NoRunSearchSearchCV(BaseSearchCV):\n def __init__(self, estimator, **kwargs):\n super().__init__(estimator, **kwargs)\n\n def fit(self, X, y=None, groups=None, **fit_params):\n return self\n\n # this should not raise any exceptions\n NoRunSearchSearchCV(SVC()).fit(X, y)\n\n class BadSearchCV(BaseSearchCV):\n def __init__(self, estimator, **kwargs):\n super().__init__(estimator, **kwargs)\n\n with pytest.raises(NotImplementedError,\n match=\"_run_search not implemented.\"):\n # this should raise a NotImplementedError\n BadSearchCV(SVC()).fit(X, y)\n\n\n@pytest.mark.parametrize(\"iid\", [False, True])\ndef test_deprecated_grid_search_iid(iid):\n # FIXME: remove in 0.24\n depr_msg = \"The parameter 'iid' is deprecated in 0.22 and will be removed\"\n X, y = make_blobs(n_samples=54, random_state=0, centers=2)\n grid = GridSearchCV(\n SVC(random_state=0), param_grid={'C': [10]}, cv=3, iid=iid\n )\n with pytest.warns(FutureWarning, match=depr_msg):\n grid.fit(X, y)\n\n\ndef test_empty_cv_iterator_error():\n # Use global X, y\n\n # create cv\n cv = KFold(n_splits=3).split(X)\n\n # pop all of it, this should cause the expected ValueError\n [u for u in cv]\n # cv is empty now\n\n train_size = 100\n ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},\n cv=cv, n_jobs=4)\n\n # assert that this raises an error\n with pytest.raises(ValueError,\n match='No fits were performed. '\n 'Was the CV iterator empty\\\\? '\n 'Were there no candidates\\\\?'):\n ridge.fit(X[:train_size], y[:train_size])\n\n\ndef test_random_search_bad_cv():\n # Use global X, y\n\n class BrokenKFold(KFold):\n def get_n_splits(self, *args, **kw):\n return 1\n\n # create bad cv\n cv = BrokenKFold(n_splits=3)\n\n train_size = 100\n ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},\n cv=cv, n_jobs=4)\n\n # assert that this raises an error\n with pytest.raises(ValueError,\n match='cv.split and cv.get_n_splits returned '\n 'inconsistent results. Expected \\\\d+ '\n 'splits, got \\\\d+'):\n ridge.fit(X[:train_size], y[:train_size])\n\n\ndef test_search_cv__pairwise_property_delegated_to_base_estimator():\n \"\"\"\n Test implementation of BaseSearchCV has the _pairwise property\n which matches the _pairwise property of its estimator.\n This test make sure _pairwise is delegated to the base estimator.\n\n Non-regression test for issue #13920.\n \"\"\"\n est = BaseEstimator()\n attr_message = \"BaseSearchCV _pairwise property must match estimator\"\n\n for _pairwise_setting in [True, False]:\n setattr(est, '_pairwise', _pairwise_setting)\n cv = GridSearchCV(est, {'n_neighbors': [10]})\n assert _pairwise_setting == cv._pairwise, attr_message\n\n\ndef test_search_cv__pairwise_property_equivalence_of_precomputed():\n \"\"\"\n Test implementation of BaseSearchCV has the _pairwise property\n which matches the _pairwise property of its estimator.\n This test ensures the equivalence of 'precomputed'.\n\n Non-regression test for issue #13920.\n \"\"\"\n n_samples = 50\n n_splits = 2\n X, y = make_classification(n_samples=n_samples, random_state=0)\n grid_params = {'n_neighbors': [10]}\n\n # defaults to euclidean metric (minkowski p = 2)\n clf = KNeighborsClassifier()\n cv = GridSearchCV(clf, grid_params, cv=n_splits)\n cv.fit(X, y)\n preds_original = cv.predict(X)\n\n # precompute euclidean metric to validate _pairwise is working\n X_precomputed = euclidean_distances(X)\n clf = KNeighborsClassifier(metric='precomputed')\n cv = GridSearchCV(clf, grid_params, cv=n_splits)\n cv.fit(X_precomputed, y)\n preds_precomputed = cv.predict(X_precomputed)\n\n attr_message = \"GridSearchCV not identical with precomputed metric\"\n assert (preds_original == preds_precomputed).all(), attr_message\n\n\n@pytest.mark.parametrize(\n \"SearchCV, param_search\",\n [(GridSearchCV, {'a': [0.1, 0.01]}),\n (RandomizedSearchCV, {'a': uniform(1, 3)})]\n)\ndef test_scalar_fit_param(SearchCV, param_search):\n # unofficially sanctioned tolerance for scalar values in fit_params\n # non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/15805\n class TestEstimator(BaseEstimator, ClassifierMixin):\n def __init__(self, a=None):\n self.a = a\n\n def fit(self, X, y, r=None):\n self.r_ = r\n\n def predict(self, X):\n return np.zeros(shape=(len(X)))\n\n model = SearchCV(TestEstimator(), param_search)\n X, y = make_classification(random_state=42)\n model.fit(X, y, r=42)\n assert model.best_estimator_.r_ == 42\n\n\n@pytest.mark.parametrize(\n \"SearchCV, param_search\",\n [(GridSearchCV, {'alpha': [0.1, 0.01]}),\n (RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})]\n)\ndef test_scalar_fit_param_compat(SearchCV, param_search):\n # check support for scalar values in fit_params, for instance in LightGBM\n # that do not exactly respect the scikit-learn API contract but that we do\n # not want to break without an explicit deprecation cycle and API\n # recommendations for implementing early stopping with a user provided\n # validation set. non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/15805\n X_train, X_valid, y_train, y_valid = train_test_split(\n *make_classification(random_state=42), random_state=42\n )\n\n class _FitParamClassifier(SGDClassifier):\n\n def fit(self, X, y, sample_weight=None, tuple_of_arrays=None,\n scalar_param=None, callable_param=None):\n super().fit(X, y, sample_weight=sample_weight)\n assert scalar_param > 0\n assert callable(callable_param)\n\n # The tuple of arrays should be preserved as tuple.\n assert isinstance(tuple_of_arrays, tuple)\n assert tuple_of_arrays[0].ndim == 2\n assert tuple_of_arrays[1].ndim == 1\n return self\n\n def _fit_param_callable():\n pass\n\n model = SearchCV(\n _FitParamClassifier(), param_search\n )\n\n # NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which\n # is not the case for the following parameters. But this abuse is common in\n # popular third-party libraries and we should tolerate this behavior for\n # now and be careful not to break support for those without following\n # proper deprecation cycle.\n fit_params = {\n 'tuple_of_arrays': (X_valid, y_valid),\n 'callable_param': _fit_param_callable,\n 'scalar_param': 42,\n }\n model.fit(X_train, y_train, **fit_params)\n", "\"\"\" Testing\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport zlib\n\nfrom io import BytesIO\n\nif sys.version_info[0] >= 3:\n cStringIO = BytesIO\nelse:\n from cStringIO import StringIO as cStringIO\n\nfrom tempfile import mkstemp\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom numpy.testing import assert_, assert_equal\nfrom pytest import raises as assert_raises\n\nfrom scipy.io.matlab.streams import (make_stream,\n GenericStream, cStringStream, FileStream, ZlibInputStream,\n _read_into, _read_string, BLOCK_SIZE)\n\nIS_PYPY = ('__pypy__' in sys.modules)\n\n\n@contextmanager\ndef setup_test_file():\n val = b'a\\x00string'\n fd, fname = mkstemp()\n\n with os.fdopen(fd, 'wb') as fs:\n fs.write(val)\n with open(fname, 'rb') as fs:\n gs = BytesIO(val)\n cs = cStringIO(val)\n yield fs, gs, cs\n os.unlink(fname)\n\n\ndef test_make_stream():\n with setup_test_file() as (fs, gs, cs):\n # test stream initialization\n assert_(isinstance(make_stream(gs), GenericStream))\n if sys.version_info[0] < 3 and not IS_PYPY:\n assert_(isinstance(make_stream(cs), cStringStream))\n assert_(isinstance(make_stream(fs), FileStream))\n\n\ndef test_tell_seek():\n with setup_test_file() as (fs, gs, cs):\n for s in (fs, gs, cs):\n st = make_stream(s)\n res = st.seek(0)\n assert_equal(res, 0)\n assert_equal(st.tell(), 0)\n res = st.seek(5)\n assert_equal(res, 0)\n assert_equal(st.tell(), 5)\n res = st.seek(2, 1)\n assert_equal(res, 0)\n assert_equal(st.tell(), 7)\n res = st.seek(-2, 2)\n assert_equal(res, 0)\n assert_equal(st.tell(), 6)\n\n\ndef test_read():\n with setup_test_file() as (fs, gs, cs):\n for s in (fs, gs, cs):\n st = make_stream(s)\n st.seek(0)\n res = st.read(-1)\n assert_equal(res, b'a\\x00string')\n st.seek(0)\n res = st.read(4)\n assert_equal(res, b'a\\x00st')\n # read into\n st.seek(0)\n res = _read_into(st, 4)\n assert_equal(res, b'a\\x00st')\n res = _read_into(st, 4)\n assert_equal(res, b'ring')\n assert_raises(IOError, _read_into, st, 2)\n # read alloc\n st.seek(0)\n res = _read_string(st, 4)\n assert_equal(res, b'a\\x00st')\n res = _read_string(st, 4)\n assert_equal(res, b'ring')\n assert_raises(IOError, _read_string, st, 2)\n\n\nclass TestZlibInputStream(object):\n def _get_data(self, size):\n data = np.random.randint(0, 256, size).astype(np.uint8).tostring()\n compressed_data = zlib.compress(data)\n stream = BytesIO(compressed_data)\n return stream, len(compressed_data), data\n\n def test_read(self):\n SIZES = [0, 1, 10, BLOCK_SIZE//2, BLOCK_SIZE-1,\n BLOCK_SIZE, BLOCK_SIZE+1, 2*BLOCK_SIZE-1]\n\n READ_SIZES = [BLOCK_SIZE//2, BLOCK_SIZE-1,\n BLOCK_SIZE, BLOCK_SIZE+1]\n\n def check(size, read_size):\n compressed_stream, compressed_data_len, data = self._get_data(size)\n stream = ZlibInputStream(compressed_stream, compressed_data_len)\n data2 = b''\n so_far = 0\n while True:\n block = stream.read(min(read_size,\n size - so_far))\n if not block:\n break\n so_far += len(block)\n data2 += block\n assert_equal(data, data2)\n\n for size in SIZES:\n for read_size in READ_SIZES:\n check(size, read_size)\n\n def test_read_max_length(self):\n size = 1234\n data = np.random.randint(0, 256, size).astype(np.uint8).tostring()\n compressed_data = zlib.compress(data)\n compressed_stream = BytesIO(compressed_data + b\"abbacaca\")\n stream = ZlibInputStream(compressed_stream, len(compressed_data))\n\n stream.read(len(data))\n assert_equal(compressed_stream.tell(), len(compressed_data))\n\n assert_raises(IOError, stream.read, 1)\n\n def test_read_bad_checksum(self):\n data = np.random.randint(0, 256, 10).astype(np.uint8).tostring()\n compressed_data = zlib.compress(data)\n\n # break checksum\n compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])\n\n compressed_stream = BytesIO(compressed_data)\n stream = ZlibInputStream(compressed_stream, len(compressed_data))\n\n assert_raises(zlib.error, stream.read, len(data))\n\n def test_seek(self):\n compressed_stream, compressed_data_len, data = self._get_data(1024)\n\n stream = ZlibInputStream(compressed_stream, compressed_data_len)\n\n stream.seek(123)\n p = 123\n assert_equal(stream.tell(), p)\n d1 = stream.read(11)\n assert_equal(d1, data[p:p+11])\n\n stream.seek(321, 1)\n p = 123+11+321\n assert_equal(stream.tell(), p)\n d2 = stream.read(21)\n assert_equal(d2, data[p:p+21])\n\n stream.seek(641, 0)\n p = 641\n assert_equal(stream.tell(), p)\n d3 = stream.read(11)\n assert_equal(d3, data[p:p+11])\n\n assert_raises(IOError, stream.seek, 10, 2)\n assert_raises(IOError, stream.seek, -1, 1)\n assert_raises(ValueError, stream.seek, 1, 123)\n\n stream.seek(10000, 1)\n assert_raises(IOError, stream.read, 12)\n\n def test_seek_bad_checksum(self):\n data = np.random.randint(0, 256, 10).astype(np.uint8).tostring()\n compressed_data = zlib.compress(data)\n\n # break checksum\n compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])\n\n compressed_stream = BytesIO(compressed_data)\n stream = ZlibInputStream(compressed_stream, len(compressed_data))\n\n assert_raises(zlib.error, stream.seek, len(data))\n\n def test_all_data_read(self):\n compressed_stream, compressed_data_len, data = self._get_data(1024)\n stream = ZlibInputStream(compressed_stream, compressed_data_len)\n assert_(not stream.all_data_read())\n stream.seek(512)\n assert_(not stream.all_data_read())\n stream.seek(1024)\n assert_(stream.all_data_read())\n\n def test_all_data_read_overlap(self):\n COMPRESSION_LEVEL = 6\n\n data = np.arange(33707000).astype(np.uint8).tostring()\n compressed_data = zlib.compress(data, COMPRESSION_LEVEL)\n compressed_data_len = len(compressed_data)\n\n # check that part of the checksum overlaps\n assert_(compressed_data_len == BLOCK_SIZE + 2)\n\n compressed_stream = BytesIO(compressed_data)\n stream = ZlibInputStream(compressed_stream, compressed_data_len)\n assert_(not stream.all_data_read())\n stream.seek(len(data))\n assert_(stream.all_data_read())\n\n def test_all_data_read_bad_checksum(self):\n COMPRESSION_LEVEL = 6\n\n data = np.arange(33707000).astype(np.uint8).tostring()\n compressed_data = zlib.compress(data, COMPRESSION_LEVEL)\n compressed_data_len = len(compressed_data)\n\n # check that part of the checksum overlaps\n assert_(compressed_data_len == BLOCK_SIZE + 2)\n\n # break checksum\n compressed_data = compressed_data[:-1] + bytes([(compressed_data[-1] + 1) & 255])\n\n compressed_stream = BytesIO(compressed_data)\n stream = ZlibInputStream(compressed_stream, compressed_data_len)\n assert_(not stream.all_data_read())\n stream.seek(len(data))\n\n assert_raises(zlib.error, stream.all_data_read)\n" ]
[ [ "numpy.savez" ], [ "numpy.sum", "numpy.min", "numpy.zeros" ], [ "sklearn.preprocessing.MaxAbsScaler", "numpy.isnan", "numpy.random.RandomState", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.PowerTransformer", "sklearn.preprocessing.QuantileTransformer", "numpy.nanmin", "sklearn.preprocessing.RobustScaler", "sklearn.utils._testing.assert_allclose", "sklearn.preprocessing.MinMaxScaler", "sklearn.model_selection.train_test_split", "sklearn.base.clone", "sklearn.datasets.load_iris" ], [ "numpy.concatenate", "numpy.isnan", "numpy.linalg.norm", "numpy.fill_diagonal", "numpy.zeros", "numpy.errstate", "numpy.sum", "scipy.stats.truncnorm", "numpy.shape", "numpy.arange", "numpy.abs", "numpy.clip", "numpy.all", "numpy.corrcoef", "numpy.argsort", "numpy.flatnonzero" ], [ "scipy._lib._testutils.PytestTester" ], [ "sklearn.utils._testing.assert_raise_message", "sklearn.model_selection.LeaveOneGroupOut", "numpy.dot", "sklearn.linear_model.LinearRegression", "scipy.stats.bernoulli", "sklearn.model_selection.RandomizedSearchCV", "numpy.mean", "sklearn.model_selection.ParameterGrid", "numpy.where", "sklearn.utils._testing.ignore_warnings", "sklearn.tree.DecisionTreeRegressor", "sklearn.metrics.pairwise.euclidean_distances", "sklearn.metrics.f1_score", "numpy.logspace", "sklearn.svm.LinearSVC", "sklearn.base.BaseEstimator", "sklearn.impute.SimpleImputer", "sklearn.model_selection.StratifiedKFold", "sklearn.utils._testing.assert_array_almost_equal", "sklearn.utils._testing.assert_warns_message", "sklearn.svm.SVC", "sklearn.model_selection.ParameterSampler", "sklearn.linear_model.SGDClassifier", "numpy.arange", "numpy.sqrt", "sklearn.metrics.make_scorer", "sklearn.model_selection.StratifiedShuffleSplit", "numpy.in1d", "scipy.sparse.csr_matrix", "sklearn.utils._testing.assert_almost_equal", "sklearn.utils._testing.assert_warns", "sklearn.base.clone", "sklearn.datasets.make_classification", "numpy.array", "sklearn.utils._testing.assert_raises", "numpy.zeros", "sklearn.model_selection.GroupShuffleSplit", "sklearn.neighbors.KNeighborsClassifier", "sklearn.model_selection.GroupKFold", "sklearn.datasets.make_multilabel_classification", "numpy.std", "numpy.allclose", "numpy.all", "sklearn.neighbors.KernelDensity", "sklearn.tree.DecisionTreeClassifier", "sklearn.model_selection.KFold", "sklearn.utils._mocking.CheckingClassifier", "sklearn.metrics.roc_auc_score", "sklearn.model_selection.tests.common.OneTimeSplitter", "sklearn.model_selection.LeavePGroupsOut", "scipy.stats.expon", "sklearn.datasets.make_blobs", "scipy.stats.uniform", "numpy.random.RandomState", "sklearn.utils._testing.assert_array_equal", "numpy.ones", "sklearn.cluster.KMeans", "sklearn.linear_model.Ridge", "sklearn.utils._testing.assert_allclose", "sklearn.model_selection.GridSearchCV", "numpy.asanyarray", "numpy.unique" ], [ "scipy.io.matlab.streams.make_stream", "numpy.testing.assert_equal", "numpy.testing.assert_", "scipy.io.matlab.streams._read_string", "numpy.random.randint", "numpy.arange", "scipy.io.matlab.streams.ZlibInputStream", "scipy.io.matlab.streams._read_into" ] ]
larsmans/astroML
[ "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d", "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d", "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d", "01ee67ea6e1c5a8dedc2498ec7397653d65b2c8d" ]
[ "book_figures/chapter3/fig_cauchy_median_mean.py", "book_figures/chapter5/fig_likelihood_gaussian.py", "book_figures/chapter5/fig_odds_ratio_coin.py", "book_figures/chapter5/fig_cauchy_mcmc.py" ]
[ "\"\"\"\nMedian and Mean for Cauchy distribution\n---------------------------------------\n\nThis plot shows graphically that mean-based statistics are not robust for\nthe Cauchy distribution. Median-based statistics should be used instead.\n\"\"\"\n# Author: Jake VanderPlas\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\n# To report a bug or issue, use the following forum:\n# https://groups.google.com/forum/#!forum/astroml-general\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy import optimize\nfrom scipy.stats import cauchy, norm\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\nfrom astroML.plotting import setup_text_plots\nsetup_text_plots(fontsize=8, usetex=True)\n\n\ndef robust_mean_mixture(x):\n \"\"\"Compute the mean via a mixture of two Gaussians\n\n One Gaussian accounts for outliers, and one Gaussian accounts for\n the true distribution. This cannot be computed analytically, so\n it uses scipy's function optimization\n \"\"\"\n if len(x) == 1:\n return x\n\n x = x.ravel()\n\n mu_bg = np.mean(x)\n sig_bg = 3 * np.std(x)\n\n likelihood = lambda v: -np.sum(np.log(norm.pdf(x, v[0], v[1])\n + norm.pdf(x, mu_bg, sig_bg)))\n\n v0 = np.array([0, 30])\n\n v_best = optimize.fmin(likelihood, v0, disp=False)\n\n return v_best[0]\n\n\ndef robust_mean_iterated(x, sigma_cut=3):\n \"\"\"Compute the robust mean iteratively\n\n After computing the mean, points further than 3 sigma from the mean\n are removed and the result is repeated until convergence.\n \"\"\"\n flag = np.ones(x.shape, dtype=bool)\n n_to_keep = x.size\n\n while True:\n xf = x[flag]\n mu = xf.mean()\n sig = xf.std()\n if len(xf) == 1:\n break\n\n x_sig = abs((x - mu) / sig)\n\n too_far = (x_sig > sigma_cut)\n flag[too_far] = False\n n_flag = flag.sum()\n\n if n_flag == n_to_keep:\n break\n else:\n n_to_keep = n_flag\n\n return mu\n\n#------------------------------------------------------------\n# Create the distribution and compute means and medians\nnp.random.seed(6)\nmu = 0\ngamma = 2\n\nxi = cauchy(mu, gamma).rvs(100)\nNrange = np.arange(1, len(xi) + 1)\n\nmean = [np.mean(xi[:N]) for N in Nrange]\nmedian = [np.median(xi[:N]) for N in Nrange]\nmean_mixture = [robust_mean_mixture(xi[:N]) for N in Nrange]\nmean_iter = [robust_mean_iterated(xi[:N]) for N in Nrange]\n\n#------------------------------------------------------------\n# Plot the results as a function of number of points\nfig = plt.figure(figsize=(5, 3.75))\nfig.subplots_adjust(hspace=0.05)\n\n# first plot the mean\nax = fig.add_subplot(211)\nax.plot(Nrange, mean, '-.b', label='mean')\nax.plot(Nrange, median, '-k', label='median')\nax.plot(Nrange, mean_mixture, ':r', label='robust mean (mixture)')\nax.plot(Nrange, mean_iter, '--g', label='robust mean (sigma-clip)')\nax.plot(Nrange, 0 * Nrange, '-', c='gray', lw=0.5)\n\nax.set_xlim(0, 100)\nax.set_ylim(-7, 7)\nax.legend(loc=4, ncol=2, frameon=False)\nax.set_ylabel('Value')\nax.xaxis.set_major_formatter(plt.NullFormatter())\n\n# now plot the median\nax = fig.add_subplot(212)\nax.scatter(Nrange, xi, lw=0, s=10, c='k')\nax.plot(Nrange, 0 * Nrange, '-', c='gray')\nax.set_xlim(0, 100)\nax.set_ylim(-75, 75)\nax.set_xlabel('Sample Size')\nax.set_ylabel('Value')\n\nplt.show()\n", "\"\"\"\nLog-likelihood for Gaussian Distribution\n----------------------------------------\n\nThis plot shows the Likelihood as a function of the mean :math:`\\mu` and the\nerror :math:`\\sigma` when the posterior is assumed to be gaussian.\n\"\"\"\n# Author: Jake VanderPlas\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\n# To report a bug or issue, use the following forum:\n# https://groups.google.com/forum/#!forum/astroml-general\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom astroML.plotting.mcmc import convert_to_stdev\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\nfrom astroML.plotting import setup_text_plots\nsetup_text_plots(fontsize=8, usetex=True)\n\n\ndef gauss_logL(xbar, V, n, sigma, mu):\n \"\"\"Equation 5.57: gaussian likelihood\"\"\"\n return (-(n + 1) * np.log(sigma)\n - 0.5 * n * ((xbar - mu) ** 2 + V) / sigma ** 2)\n\n#------------------------------------------------------------\n# Define the grid and compute logL\nsigma = np.linspace(1, 5, 70)\nmu = np.linspace(-3, 5, 70)\nxbar = 1\nV = 4\nn = 10\n\nlogL = gauss_logL(xbar, V, n, sigma[:, np.newaxis], mu)\nlogL -= logL.max()\n\n#------------------------------------------------------------\n# Plot the results\nfig = plt.figure(figsize=(5, 3.75))\nplt.imshow(logL, origin='lower',\n extent=(mu[0], mu[-1], sigma[0], sigma[-1]),\n cmap=plt.cm.binary,\n aspect='auto')\nplt.colorbar().set_label(r'$\\log(L)$')\nplt.clim(-5, 0)\n\nplt.contour(mu, sigma, convert_to_stdev(logL),\n levels=(0.683, 0.955, 0.997),\n colors='k')\n\nplt.text(0.5, 0.93, r'$L(\\mu,\\sigma)\\ \\mathrm{for}\\ \\bar{x}=1,\\ V=4,\\ n=10$',\n bbox=dict(ec='k', fc='w', alpha=0.9),\n ha='center', va='center', transform=plt.gca().transAxes)\n\nplt.xlabel(r'$\\mu$')\nplt.ylabel(r'$\\sigma$')\n\nplt.show()\n", "\"\"\"\nCoin Odds Ratio\n---------------\n\nThis figure shows the odds ratio for a coin flip. The curves show the\nodds ratio between model :math:`M_1`, in which the probability of landing\nheads is known to be :math:`b^*`, and model :math:`M_2`, where the probability\nof landing heads is unknown.\n\nHere we plot the odds ratio between the models, :math:`O_{21}`, as a function\nof :math:`k` heads observed :math:`n` coin tosses. Comparing the panels, it\nis clear that, as expected, observing more tosses gives better constraints\non models via the odds ratio.\n\"\"\"\n# Author: Jake VanderPlas\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\n# To report a bug or issue, use the following forum:\n# https://groups.google.com/forum/#!forum/astroml-general\nimport numpy as np\nfrom scipy import integrate\nfrom matplotlib import pyplot as plt\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\nfrom astroML.plotting import setup_text_plots\nsetup_text_plots(fontsize=8, usetex=True)\n\n\n@np.vectorize\ndef odds_ratio(n, k, bstar):\n \"\"\"Odds ratio between M_2, where the heads probability is unknown,\n and M_1, where the heads probability is known to be `bstar`, evaluated\n in the case of `k` heads observed in `n` tosses.\n\n Eqn. 5.25 in the text\n \"\"\"\n factor = 1. / (bstar ** k * (1 - bstar) ** (n - k))\n f = lambda b: b ** k * (1 - b) ** (n - k)\n\n return factor * integrate.quad(f, 0, 1)[0]\n\n#------------------------------------------------------------\n# Plot the results\nfig = plt.figure(figsize=(5, 2.5))\nfig.subplots_adjust(left=0.13, right=0.95, wspace=0.05, bottom=0.15)\n\nsubplots = [121, 122]\nn_array = [10, 20]\n\nlinestyles = ['-k', '--b']\nbstar_array = [0.5, 0.1]\n\nfor subplot, n in zip(subplots, n_array):\n ax = fig.add_subplot(subplot, yscale='log')\n k = np.arange(n + 1)\n\n # plot curves for two values of bstar\n for ls, bstar in zip(linestyles, bstar_array):\n ax.plot(k, odds_ratio(n, k, bstar), ls,\n label=r'$b^* = %.1f$' % bstar)\n\n if subplot == 121:\n ax.set_xlim(0, n - 0.01)\n ax.set_ylabel(r'$O_{21}$')\n ax.legend(loc=2)\n else:\n ax.set_xlim(0, n)\n ax.yaxis.set_major_formatter(plt.NullFormatter())\n\n ax.set_xlabel('$k$')\n ax.set_title('$n = %i$' % n)\n ax.set_ylim(8E-2, 1E3)\n ax.xaxis.set_major_locator(plt.MultipleLocator(n / 5))\n ax.grid()\n\n\nplt.show()\n", "\"\"\"\nMCMC for the Cauchy distribution\n--------------------------------\n\nThis example shows how to use pyMC to sample the likelihood for the\nparameters of a Cauchy distribution\n\"\"\"\n# Author: Jake VanderPlas\n# License: BSD\n# The figure produced by this code is published in the textbook\n# \"Statistics, Data Mining, and Machine Learning in Astronomy\" (2013)\n# For more information, see http://astroML.github.com\n# To report a bug or issue, use the following forum:\n# https://groups.google.com/forum/#!forum/astroml-general\nimport numpy as np\nfrom scipy.stats import cauchy\nfrom matplotlib import pyplot as plt\nfrom astroML.plotting.mcmc import convert_to_stdev\n\n# this fixes a problem when using older versions of pymc with newer\n# versions of scipy\nimport scipy\nscipy.derivative = scipy.misc.derivative\nimport pymc\n\n#----------------------------------------------------------------------\n# This function adjusts matplotlib settings for a uniform feel in the textbook.\n# Note that with usetex=True, fonts are rendered with LaTeX. This may\n# result in an error if LaTeX is not installed on your system. In that case,\n# you can set usetex to False.\nfrom astroML.plotting import setup_text_plots\nsetup_text_plots(fontsize=8, usetex=True)\n\n\ndef cauchy_logL(xi, sigma, mu):\n \"\"\"Equation 5.74: cauchy likelihood\"\"\"\n xi = np.asarray(xi)\n n = xi.size\n shape = np.broadcast(sigma, mu).shape\n\n xi = xi.reshape(xi.shape + tuple([1 for s in shape]))\n\n return ((n - 1) * np.log(sigma)\n - np.sum(np.log(sigma ** 2 + (xi - mu) ** 2), 0))\n\n\n#----------------------------------------------------------------------\n# Draw the sample from a Cauchy distribution\nnp.random.seed(44)\nmu_0 = 0\ngamma_0 = 2\nxi = cauchy(mu_0, gamma_0).rvs(10)\n\n#----------------------------------------------------------------------\n# Perform MCMC:\n\n# set up our Stochastic variables, mu and gamma\nmu = pymc.Uniform('mu', -5, 5)\nlog_gamma = pymc.Uniform('log_gamma', -10, 10, value=0)\n\n\n@pymc.deterministic\ndef gamma(log_gamma=log_gamma):\n return np.exp(log_gamma)\n\n# set up our observed variable x\nx = pymc.Cauchy('x', mu, gamma, observed=True, value=xi)\n\n# set up our model dictionary\nmodel = dict(mu=mu, log_gamma=log_gamma, gamma=gamma, x=x)\n\n# perform the MCMC\nS = pymc.MCMC(model)\nS.sample(iter=50000, burn=5000)\n\n# extract the traces we're interested in\ntrace_mu = S.trace('mu')[:]\ntrace_gamma = S.trace('gamma')[:]\n\n# compute histogram of results to plot below\nL_MCMC, mu_bins, gamma_bins = np.histogram2d(trace_mu, trace_gamma,\n bins=(np.linspace(-5, 5, 41),\n np.linspace(0, 5, 41)))\nL_MCMC[L_MCMC == 0] = 1E-16 # prevents zero-division errors\n\n#----------------------------------------------------------------------\n# Compute likelihood analytically for comparison\nmu = np.linspace(-5, 5, 70)\ngamma = np.linspace(0.1, 5, 70)\nlogL = cauchy_logL(xi, gamma[:, np.newaxis], mu)\nlogL -= logL.max()\n\np_mu = np.exp(logL).sum(0)\np_mu /= p_mu.sum() * (mu[1] - mu[0])\n\np_gamma = np.exp(logL).sum(1)\np_gamma /= p_gamma.sum() * (gamma[1] - gamma[0])\n\nhist_mu, bins_mu = np.histogram(trace_mu, bins=mu_bins, normed=True)\nhist_gamma, bins_gamma = np.histogram(trace_gamma, bins=gamma_bins,\n normed=True)\n\n\n#----------------------------------------------------------------------\n# plot the results\nfig = plt.figure(figsize=(5, 5))\n\n# first axis: likelihood contours\nax1 = fig.add_axes((0.4, 0.4, 0.55, 0.55))\nax1.xaxis.set_major_formatter(plt.NullFormatter())\nax1.yaxis.set_major_formatter(plt.NullFormatter())\n\nax1.contour(mu, gamma, convert_to_stdev(logL),\n levels=(0.683, 0.955, 0.997),\n colors='b', linestyles='dashed')\n\nax1.contour(0.5 * (mu_bins[:-1] + mu_bins[1:]),\n 0.5 * (gamma_bins[:-1] + gamma_bins[1:]),\n convert_to_stdev(np.log(L_MCMC.T)),\n levels=(0.683, 0.955, 0.997),\n colors='k')\n\n# second axis: marginalized over mu\nax2 = fig.add_axes((0.1, 0.4, 0.29, 0.55))\nax2.xaxis.set_major_formatter(plt.NullFormatter())\nax2.plot(hist_gamma, 0.5 * (bins_gamma[1:] + bins_gamma[:-1]\n - bins_gamma[1] + bins_gamma[0]),\n '-k', drawstyle='steps')\nax2.plot(p_gamma, gamma, '--b')\nax2.set_ylabel(r'$\\gamma$')\nax2.set_ylim(0, 5)\n\n# third axis: marginalized over gamma\nax3 = fig.add_axes((0.4, 0.1, 0.55, 0.29))\nax3.yaxis.set_major_formatter(plt.NullFormatter())\nax3.plot(0.5 * (bins_mu[1:] + bins_mu[:-1]), hist_mu,\n '-k', drawstyle='steps-mid')\nax3.plot(mu, p_mu, '--b')\nax3.set_xlabel(r'$\\mu$')\nplt.xlim(-5, 5)\n\nplt.show()\n" ]
[ [ "scipy.stats.norm.pdf", "numpy.array", "numpy.random.seed", "numpy.median", "numpy.ones", "numpy.mean", "matplotlib.pyplot.figure", "numpy.std", "scipy.optimize.fmin", "scipy.stats.cauchy", "matplotlib.pyplot.show", "matplotlib.pyplot.NullFormatter" ], [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.clim", "numpy.log", "matplotlib.pyplot.gca", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.imshow" ], [ "matplotlib.pyplot.figure", "matplotlib.pyplot.MultipleLocator", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.NullFormatter", "scipy.integrate.quad" ], [ "numpy.histogram", "matplotlib.pyplot.xlim", "numpy.asarray", "numpy.broadcast", "numpy.log", "numpy.random.seed", "numpy.exp", "matplotlib.pyplot.figure", "scipy.stats.cauchy", "matplotlib.pyplot.show", "numpy.linspace", "matplotlib.pyplot.NullFormatter" ] ]
aotuai/capsule-zoo
[ "bb0093799cf035a88153a9be6ed1e58df9923a8e" ]
[ "capsules/detector_text_openvino/backend.py" ]
[ "from typing import Dict, List, Any\n\nimport numpy as np\nimport cv2\n\nfrom vcap import (\n DetectionNode,\n DETECTION_NODE_TYPE,\n OPTION_TYPE,\n BaseStreamState,\n BaseBackend,\n rect_to_coords)\nfrom vcap_utils import (\n BaseOpenVINOBackend,\n)\n\nSOS_INDEX = 0\nEOS_INDEX = 1\nMAX_SEQ_LEN = 28\nALPHABET = ' 0123456789abcdefghijklmnopqrstuvwxyz'\n\n\n# We have to do this because we need there to be a process_frame to use it\nclass OpenVINOModel(BaseOpenVINOBackend):\n def process_frame(self,\n frame: np.ndarray,\n detection_node: DETECTION_NODE_TYPE,\n options: Dict[str, OPTION_TYPE],\n state: BaseStreamState) -> DETECTION_NODE_TYPE:\n raise NotImplemented('This backend is not for processing frames. '\n 'It is only used for storing a model.')\n\n\nclass Backend(BaseBackend):\n label_map: Dict[int, str] = {1: \"text\"}\n\n def __init__(self, detector: OpenVINOModel,\n recognizer_encoder: OpenVINOModel,\n recognizer_decoder: OpenVINOModel):\n super().__init__()\n self.detector = detector\n self.recognizer_encoder = recognizer_encoder\n self.recognizer_decoder = recognizer_decoder\n\n @property\n def workload(self) -> float:\n return (self.detector.workload +\n self.recognizer_encoder.workload +\n self.recognizer_decoder.workload)\n\n def process_frame(self, frame: np.ndarray,\n detection_node: DETECTION_NODE_TYPE,\n options: Dict[str, OPTION_TYPE],\n state: BaseStreamState) -> DETECTION_NODE_TYPE:\n n, c, h, w = self.detector.net.inputs['im_data'].shape\n hidden_shape = self.recognizer_decoder.net.inputs['prev_hidden'].shape\n\n input_dict, resize = self.detector.prepare_inputs(\n frame,\n frame_input_name=\"im_data\"\n )\n input_dict[\"im_data\"] = (input_dict[\"im_data\"]\n .reshape((n, c, h, w)).astype(np.float32))\n\n input_image_size = self.detector.net.inputs['im_data'].shape[-2:]\n input_image_info = np.asarray(\n [[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)\n input_dict[\"im_info\"] = input_image_info\n prediction = self.detector.send_to_batch(input_dict).result()\n\n scores = prediction[\"scores\"]\n detections_filter = scores > options[\"threshold\"]\n scores = scores[detections_filter]\n rects = prediction[\"boxes\"][detections_filter]\n text_features = prediction[\"text_features\"][detections_filter]\n\n feature_queues = []\n for text_feature in text_features:\n feature_queues.append(\n self.recognizer_encoder.send_to_batch({'input': text_feature}))\n\n detections = []\n for score, rect, feature_queue in zip(scores, rects, feature_queues):\n feature = feature_queue.result()['output']\n feature = np.reshape(feature,\n (feature.shape[0], feature.shape[1], -1))\n feature = np.transpose(feature, (0, 2, 1))\n\n hidden = np.zeros(hidden_shape)\n prev_symbol_index = np.ones((1,)) * SOS_INDEX\n\n text = ''\n for _ in range(MAX_SEQ_LEN):\n decoder_output = self.recognizer_decoder.send_to_batch({\n 'prev_symbol': prev_symbol_index,\n 'prev_hidden': hidden,\n 'encoder_outputs': feature\n }).result()\n symbols_distr = decoder_output['output']\n prev_symbol_index = int(np.argmax(symbols_distr, axis=1))\n if prev_symbol_index == EOS_INDEX:\n break\n text += ALPHABET[prev_symbol_index]\n hidden = decoder_output['hidden']\n\n detections.append(DetectionNode(\n name=\"text\",\n coords=rect_to_coords(rect.tolist()),\n extra_data={\n \"detection_confidence\": float(score),\n \"text\": text\n },\n ))\n return resize.scale_and_offset_detection_nodes(detections)\n" ]
[ [ "numpy.reshape", "numpy.asarray", "numpy.zeros", "numpy.ones", "numpy.transpose", "numpy.argmax" ] ]
xupingxie/deep-learning-models
[ "cc76aedf9631317452f9cd7df38998e2de727816" ]
[ "NN_buildingblock/SingleNN.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nSingle NN\n\n@author: xuping\n\"\"\"\nimport numpy as np\nimport scipy.io\n#from threeNN import sigmoid\n\ndef layer_sizes(X, Y):\n n_in = X.shape[0]\n n_out = Y.shape[0]\n return(n_in, n_out)\n\ndef initialize_parameters(dim):\n np.random.seed(3)\n \n W = np.random.randn(dim, dim)*0.01\n b = np.zeros((dim, 1))\n return W,b\n\ndef prop(W,b,X,Y,lambd):\n m = X.shape[1]\n #forward\n A = sigmoid(np.dot(W, X) + b)\n cost = 1./m*np.sum(np.sum(np.square(A-Y),axis=0,keepdims=True)) + lambd/(2*m)*np.sum(np.sum(W*W))\n #cost = 1./m*np.sum(np.sum(np.square(A-Y)))\n #backward\n Z = np.dot(W, X) + b\n dZ = 2*(A-Y)*sigmoid(Z)*(1-sigmoid(Z))\n dW = 1./m*np.dot(dZ, X.T) + lambd/m*W\n #dW = 1./m*np.dot(dZ, X.T)\n db = 1./m*np.sum(dZ,axis=1,keepdims=True)\n \n grads = {\"dW\":dW, \"db\":db}\n return grads, cost\n\ndef nn_model(X,Y,num_iterations, lambd, learning_rate, print_cost=True):\n #np.random.seed(3)\n costs = []\n \n W, b = initialize_parameters(X.shape[0])\n \n for i in range(num_iterations):\n \n grads, cost = prop(W,b,X,Y,lambd)\n dW = grads[\"dW\"]\n db = grads[\"db\"]\n \n W = W-learning_rate*dW\n b = b-learning_rate*db\n \n if print_cost and i%1000==0:\n print(\"cost after iteration %i: %f\" %(i, cost))\n costs.append(cost)\n \n parameters={\"W\":W, \"b\":b}\n grads={\"dW\":dW, \"db\":db}\n \n return parameters, costs\n\ndef predict(parameters, X):\n W=parameters[\"W\"]\n b=parameters[\"b\"]\n A = sigmoid(np.dot(W, X) + b)\n return A\n\ndef load_data():\n data=scipy.io.loadmat('U_Train.mat')\n X = data['ud']\n Y10 = data['tauR10']\n Y5 = data['tauR5']\n Y6 = data['tauR6']\n \n return X, Y5, Y6, Y10\n\nif __name__ == \"__main__\":\n \n #load data\n X, Y5, Y6, Y10 = load_data()\n X5 = X[:5, :]\n X6 = X[:6, :]\n X10 = X[:10, :]\n \n num_iterations = 30000\n lambd = 10\n learning_rate = 3\n \"\"\"\n X=X6\n Y=Y6\n np.random.seed(3)\n dim=X.shape[0]\n W = np.random.randn(dim, dim)*0.01\n b = np.zeros((dim, 1))\n Z = np.dot(W, X) + b\n A = sigmoid(Z)\n cost = A-Y\n #dZ = 2*(A-Y)*sigmoid(Z)*(1-sigmoid(Z))\n #dW = 1/m*np.dot(dZ, X.T)\n #db = 1/m*np.sum(dZ,axis=1,keepdims=True)\n \"\"\"\n #parameters5, cost5 = nn_model(X5, Y5, num_iterations, lambd, learning_rate, print_cost=True)\n parameters6, cost6 = nn_model(X6, Y6, num_iterations, lambd, learning_rate, print_cost=True)\n #parameters10, cost10 = nn_model(X10, Y10, num_iterations, lambd, learning_rate, print_cost=True)\n \n #W5=parameters5[\"W\"]\n #b5=parameters5[\"b\"]\n W6=parameters6[\"W\"]\n b6=parameters6[\"b\"]\n #W10=parameters10[\"W\"]\n #b10=parameters10[\"b\"]\n \n #scipy.io.savemat('weights6.mat',{'W6':W6})\n #scipy.io.savemat('bias.mat',{'b6':b6})\n \n \n" ]
[ [ "numpy.square", "numpy.dot", "numpy.zeros", "numpy.random.seed", "numpy.sum", "numpy.random.randn" ] ]
infected-mushroom/catboost
[ "77a1ae1d5cf997b55c52658585ce0dd0196a9e43" ]
[ "catboost/python-package/catboost/utils.py" ]
[ "from .core import Pool, CatboostError, get_catboost_bin_module, ARRAY_TYPES\nfrom collections import defaultdict\nimport numpy as np\n\n_catboost = get_catboost_bin_module()\n_eval_metric_util = _catboost._eval_metric_util\n_get_roc_curve = _catboost._get_roc_curve\n_select_threshold = _catboost._select_threshold\n\n\ndef create_cd(\n label=None,\n cat_features=None,\n weight=None,\n baseline=None,\n doc_id=None,\n group_id=None,\n subgroup_id=None,\n timestamp=None,\n auxiliary_columns=None,\n feature_names=None,\n output_path='train.cd'\n):\n _from_param_to_cd = {\n 'label': 'Label',\n 'weight': 'Weight',\n 'baseline': 'Baseline',\n 'doc_id': 'DocId',\n 'group_id': 'GroupId',\n 'subgroup_id': 'SubgroupId',\n 'timestamp': 'Timestamp'\n }\n _column_description = defaultdict(lambda: ['Num', ''])\n for key, value in locals().copy().items():\n if not (key.startswith('_') or value is None):\n if key in ('cat_features', 'auxiliary_columns'):\n if isinstance(value, int):\n value = [value]\n for index in value:\n if not isinstance(index, int):\n raise CatboostError('Unsupported index type. Expected int, got {}'.format(type(index)))\n if index in _column_description:\n raise CatboostError('The index {} occurs more than once'.format(index))\n _column_description[index] = ['Categ', ''] if key == 'cat_features' else ['Auxiliary', '']\n elif key not in ('feature_names', 'output_path'):\n if not isinstance(value, int):\n raise CatboostError('Unsupported index type. Expected int, got {}'.format(type(value)))\n if value in _column_description:\n raise CatboostError('The index {} occurs more than once'.format(value))\n _column_description[value] = [_from_param_to_cd[key], '']\n if feature_names is not None:\n for feature_index, name in feature_names.items():\n real_feature_index = feature_index\n for column_index, (title, _) in sorted(_column_description.items()):\n if column_index > real_feature_index:\n break\n if title not in ('Num', 'Categ'):\n real_feature_index += 1\n _column_description[real_feature_index][1] = name\n with open(output_path, 'w') as f:\n for index, (title, name) in sorted(_column_description.items()):\n f.write('{}\\t{}\\t{}\\n'.format(index, title, name))\n\n\ndef eval_metric(label, approx, metric, weight=None, group_id=None, thread_count=-1):\n \"\"\"\n Evaluate metrics with raw approxes and labels.\n\n Parameters\n ----------\n label : list or numpy.arrays or pandas.DataFrame or pandas.Series\n Object labels.\n\n approx : list or numpy.arrays or pandas.DataFrame or pandas.Series\n Object approxes.\n\n metrics : list of strings\n List of eval metrics.\n\n weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Object weights.\n\n group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Object group ids.\n\n thread_count : int, optional (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n metric results : list with metric values.\n \"\"\"\n if len(approx) == 0:\n approx = [[]]\n if not isinstance(approx[0], ARRAY_TYPES):\n approx = [approx]\n return _eval_metric_util(label, approx, metric, weight, group_id, thread_count)\n\n\ndef get_gpu_device_count():\n return get_catboost_bin_module()._get_gpu_device_count()\n\n\ndef reset_trace_backend(filename):\n get_catboost_bin_module()._reset_trace_backend(filename)\n\n\ndef get_roc_curve(model, data, thread_count=-1):\n \"\"\"\n Build points of ROC curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of three arrays (fpr, tpr, thresholds)\n \"\"\"\n if type(data) == Pool:\n data = [data]\n if not isinstance(data, list):\n raise CatboostError('data must be a catboost.Pool or list of pools.')\n for pool in data:\n if not isinstance(pool, Pool):\n raise CatboostError('one of data pools is not catboost.Pool')\n\n return _get_roc_curve(model._object, data, thread_count)\n\n\ndef get_fpr_curve(model=None, data=None, curve=None, thread_count=-1):\n \"\"\"\n Build points of FPR curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of two arrays (thresholds, fpr)\n \"\"\"\n if curve is not None:\n if data is not None:\n raise CatboostError('Only one of the parameters data and curve should be set.')\n if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3:\n raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')\n fpr, thresholds = curve[0][:], curve[2][:]\n else:\n if model is None or data is None:\n raise CatboostError('model and data parameters should be set when curve parameter is None.')\n fpr, _, thresholds = get_roc_curve(model, data, thread_count)\n return thresholds, fpr\n\n\ndef get_fnr_curve(model=None, data=None, curve=None, thread_count=-1):\n \"\"\"\n Build points of FNR curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of two arrays (thresholds, fnr)\n \"\"\"\n if curve is not None:\n if data is not None:\n raise CatboostError('Only one of the parameters data and curve should be set.')\n if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3:\n raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')\n tpr, thresholds = curve[1], curve[2][:]\n else:\n if model is None or data is None:\n raise CatboostError('model and data parameters should be set when curve parameter is None.')\n _, tpr, thresholds = get_roc_curve(model, data, thread_count)\n fnr = np.array([1 - x for x in tpr])\n return thresholds, fnr\n\n\ndef select_threshold(model=None, data=None, curve=None, FPR=None, FNR=None, thread_count=-1):\n \"\"\"\n Selects a threshold for prediction.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n Set of samples to build ROC curve with.\n If set, curve parameter must not be set.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n FPR : desired false-positive rate\n\n FNR : desired false-negative rate (only one of FPR and FNR should be chosen)\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n threshold : double\n \"\"\"\n if data is not None:\n if curve is not None:\n raise CatboostError('Only one of the parameters data and curve should be set.')\n if model is None:\n raise CatboostError('model and data parameters should be set when curve parameter is None.')\n if type(data) == Pool:\n data = [data]\n if not isinstance(data, list):\n raise CatboostError('data must be a catboost.Pool or list of pools.')\n for pool in data:\n if not isinstance(pool, Pool):\n raise CatboostError('one of data pools is not catboost.Pool')\n elif curve is not None:\n if not (isinstance(curve, list) or isinstance(curve, tuple)) or len(curve) != 3:\n raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')\n else:\n raise CatboostError('One of the parameters data and curve should be set.')\n\n return _select_threshold(model._object, data, curve, FPR, FNR, thread_count)\n" ]
[ [ "numpy.array" ] ]
pnposch/runpandas
[ "25388c18b52dfcc168e81922b8ba20ca93adad20" ]
[ "runpandas/tests/test_strava_parser.py" ]
[ "\"\"\"\nTest module for Strava API reader base module\n\"\"\"\n\nimport os\nimport json\nimport pytest\nfrom pandas import DataFrame, Timedelta, Timestamp\nfrom runpandas import read_strava\nfrom runpandas import types\nfrom stravalib.protocol import ApiV3\nfrom stravalib.client import Client\nfrom stravalib.model import Stream\n\npytestmark = pytest.mark.stable\n\n\nclass MockResponse:\n def __init__(self, json_file):\n with open(json_file) as json_handler:\n self.json_data = json.load(json_handler)\n\n def json(self):\n return self.json_data\n\n\ndef mock_get_activity_streams(streams_file):\n \"\"\"\n @TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,\n it isn't the best alternative for mock the request from strava by passing a json file.\n \"\"\"\n\n stream_mock = MockResponse(streams_file).json()\n entities = {}\n for key, value in stream_mock.items():\n value[\"type\"] = key\n stream = Stream.deserialize(value)\n entities[stream.type] = stream\n return entities\n\n\n@pytest.fixture\ndef dirpath(datapath):\n return datapath(\"io\", \"data\")\n\n\n@pytest.fixture\ndef strava_activity(dirpath, mocker):\n activity_json = os.path.join(dirpath, \"strava\", \"activity.json\")\n streams_json = os.path.join(dirpath, \"strava\", \"streams.json\")\n\n mocker.patch.object(ApiV3, \"get\", return_value=MockResponse(activity_json).json())\n\n mocker.patch.object(\n Client,\n \"get_activity_streams\",\n return_value=mock_get_activity_streams(streams_json),\n )\n # we don't use access token here, since we will mock the stravalib json response\n activity = read_strava(\n activity_id=4437021783,\n access_token=None,\n refresh_token=None,\n to_df=False,\n )\n return activity\n\n\n@pytest.fixture\ndef strava_dataframe(dirpath, mocker):\n activity_json = os.path.join(dirpath, \"strava\", \"activity.json\")\n streams_json = os.path.join(dirpath, \"strava\", \"streams.json\")\n\n mocker.patch.object(ApiV3, \"get\", return_value=MockResponse(activity_json).json())\n\n mocker.patch.object(\n Client,\n \"get_activity_streams\",\n return_value=mock_get_activity_streams(streams_json),\n )\n # we don't use access token here, since we will mock the stravalib json response\n activity = read_strava(\n activity_id=4437021783,\n access_token=None,\n refresh_token=None,\n to_df=True,\n )\n return activity\n\n\ndef test_read_strava_basic_dataframe(dirpath, mocker):\n activity_json = os.path.join(dirpath, \"strava\", \"activity.json\")\n streams_json = os.path.join(dirpath, \"strava\", \"streams.json\")\n\n mocker.patch.object(ApiV3, \"get\", return_value=MockResponse(activity_json).json())\n\n mocker.patch.object(\n Client,\n \"get_activity_streams\",\n return_value=mock_get_activity_streams(streams_json),\n )\n # we don't use access token here, since we will mock the stravalib json response\n activity = read_strava(\n activity_id=4437021783,\n access_token=None,\n refresh_token=None,\n to_df=True,\n )\n assert isinstance(activity, DataFrame)\n included_data = set(\n [\n \"latitude\",\n \"longitude\",\n \"altitude\",\n \"distance\",\n \"velocity_smooth\",\n \"heartrate\",\n \"cadence\",\n \"moving\",\n \"grade_smooth\",\n ]\n )\n assert included_data <= set(activity.columns.to_list())\n assert activity.size == 15723\n\n\ndef test_read_strava_activity(dirpath, mocker):\n activity_json = os.path.join(dirpath, \"strava\", \"activity.json\")\n streams_json = os.path.join(dirpath, \"strava\", \"streams.json\")\n\n mocker.patch.object(ApiV3, \"get\", return_value=MockResponse(activity_json).json())\n\n mocker.patch.object(\n Client,\n \"get_activity_streams\",\n return_value=mock_get_activity_streams(streams_json),\n )\n\n # we don't use access token here, since we will mock the stravalib json response\n activity = read_strava(\n activity_id=4437021783,\n access_token=None,\n refresh_token=None,\n to_df=False,\n )\n assert isinstance(activity, types.Activity)\n included_data = set(\n [\n \"alt\",\n \"cad\",\n \"dist\",\n \"hr\",\n \"lon\",\n \"lat\",\n \"moving\",\n \"velocity_smooth\",\n \"grade_smooth\",\n ]\n )\n assert included_data <= set(activity.columns.to_list())\n assert activity.size == 15723\n\n\ntest_data = [\n (pytest.lazy_fixture(\"strava_activity\"), \"alt\", 0, 6.4),\n (pytest.lazy_fixture(\"strava_activity\"), \"alt\", -1, 6.6),\n (pytest.lazy_fixture(\"strava_activity\"), \"cad\", 0, 79),\n (pytest.lazy_fixture(\"strava_activity\"), \"cad\", -1, 86),\n (pytest.lazy_fixture(\"strava_activity\"), \"dist\", 0, 0.0),\n (pytest.lazy_fixture(\"strava_activity\"), \"dist\", -1, 12019.7),\n (pytest.lazy_fixture(\"strava_activity\"), \"hr\", 0, 111),\n (pytest.lazy_fixture(\"strava_activity\"), \"hr\", -1, 160),\n (pytest.lazy_fixture(\"strava_activity\"), \"lat\", 0, -8.016994),\n (pytest.lazy_fixture(\"strava_activity\"), \"lon\", 0, -34.847439),\n (pytest.lazy_fixture(\"strava_activity\"), \"lat\", -1, -8.016821),\n (pytest.lazy_fixture(\"strava_activity\"), \"lon\", -1, -34.84716),\n (pytest.lazy_fixture(\"strava_activity\"), \"moving\", 0, False),\n (pytest.lazy_fixture(\"strava_activity\"), \"moving\", -1, True),\n (pytest.lazy_fixture(\"strava_activity\"), \"velocity_smooth\", 0, 0.0),\n (pytest.lazy_fixture(\"strava_activity\"), \"velocity_smooth\", -1, 3.2),\n (pytest.lazy_fixture(\"strava_activity\"), \"grade_smooth\", 0, 1.1),\n (pytest.lazy_fixture(\"strava_activity\"), \"grade_smooth\", -1, -0.6),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"altitude\", 0, 6.4),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"altitude\", -1, 6.6),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"cadence\", 0, 79),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"cadence\", -1, 86),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"distance\", 0, 0.0),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"distance\", -1, 12019.7),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"heartrate\", 0, 111),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"heartrate\", -1, 160),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"latitude\", 0, -8.016994),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"longitude\", 0, -34.847439),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"latitude\", -1, -8.016821),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"longitude\", -1, -34.84716),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"moving\", 0, False),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"moving\", -1, True),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"velocity_smooth\", 0, 0.0),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"velocity_smooth\", -1, 3.2),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"grade_smooth\", 0, 1.1),\n (pytest.lazy_fixture(\"strava_dataframe\"), \"grade_smooth\", -1, -0.6),\n]\n\n\n@pytest.mark.parametrize(\"activity,column,index,expected\", test_data)\ndef test_strava_values(activity, column, index, expected):\n assert activity[column].iloc[index] == expected\n assert activity.index[-1] == Timedelta(\"0 days 01:25:45\")\n\n if isinstance(activity, types.Activity):\n assert activity.start == Timestamp(\"2020-12-06 06:36:27\")\n" ]
[ [ "pandas.Timestamp", "pandas.Timedelta" ] ]
hklion/WarpX
[ "3c2d0ee2815ab1df21b9f78d899fe7b1a9651758" ]
[ "Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright 2019-2022 Luca Fedeli, Yinjian Zhao, Hannah Klion\n#\n# This file is part of WarpX.\n#\n# License: BSD-3-Clause-LBNL\n\n# This script tests the reduced particle diagnostics.\n# The setup is a uniform plasma with electrons, protons and photons.\n# Various particle and field quantities are written to file using the reduced diagnostics\n# and compared with the corresponding quantities computed from the data in the plotfiles.\n\nimport os\nimport sys\n\nimport numpy as np\nimport openpmd_api as io\nfrom scipy.constants import c\nfrom scipy.constants import epsilon_0 as eps0\nfrom scipy.constants import m_e, m_p\nfrom scipy.constants import mu_0 as mu0\nimport yt\n\nsys.path.insert(1, '../../../../warpx/Regression/Checksum/')\nimport checksumAPI\n\n\ndef do_analysis(single_precision = False):\n fn = sys.argv[1]\n\n ds = yt.load(fn)\n ad = ds.all_data()\n ad0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)\n\n opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only)\n opmd_i = opmd.iterations[200]\n\n #--------------------------------------------------------------------------------------------------\n # Part 1: get results from plotfiles (label '_yt')\n #--------------------------------------------------------------------------------------------------\n\n # Quantities computed from plotfiles\n values_yt = dict()\n\n domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value\n dx = domain_size / ds.domain_dimensions\n\n # Electrons\n x = ad['electrons', 'particle_position_x'].to_ndarray()\n y = ad['electrons', 'particle_position_y'].to_ndarray()\n z = ad['electrons', 'particle_position_z'].to_ndarray()\n uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c\n w = ad['electrons', 'particle_weight'].to_ndarray()\n filt = uz < 0\n\n x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)\n y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)\n z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)\n\n zavg = np.zeros(ds.domain_dimensions)\n uzavg = np.zeros(ds.domain_dimensions)\n zuzavg = np.zeros(ds.domain_dimensions)\n wavg = np.zeros(ds.domain_dimensions)\n uzavg_filt = np.zeros(ds.domain_dimensions)\n wavg_filt = np.zeros(ds.domain_dimensions)\n\n for i_p in range(len(x)):\n zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]\n uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]\n zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]\n wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]\n uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]\n wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]\n\n wavg_adj = np.where(wavg == 0, 1, wavg)\n wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)\n values_yt['electrons: zavg'] = zavg / wavg_adj\n values_yt['electrons: uzavg'] = uzavg / wavg_adj\n values_yt['electrons: zuzavg'] = zuzavg / wavg_adj\n values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj\n\n # protons\n x = ad['protons', 'particle_position_x'].to_ndarray()\n y = ad['protons', 'particle_position_y'].to_ndarray()\n z = ad['protons', 'particle_position_z'].to_ndarray()\n uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c\n w = ad['protons', 'particle_weight'].to_ndarray()\n filt = uz < 0\n\n x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)\n y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)\n z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)\n\n zavg = np.zeros(ds.domain_dimensions)\n uzavg = np.zeros(ds.domain_dimensions)\n zuzavg = np.zeros(ds.domain_dimensions)\n wavg = np.zeros(ds.domain_dimensions)\n uzavg_filt = np.zeros(ds.domain_dimensions)\n wavg_filt = np.zeros(ds.domain_dimensions)\n\n for i_p in range(len(x)):\n zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]\n uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]\n zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]\n wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]\n uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]\n wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]\n\n wavg_adj = np.where(wavg == 0, 1, wavg)\n wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)\n values_yt['protons: zavg'] = zavg / wavg_adj\n values_yt['protons: uzavg'] = uzavg / wavg_adj\n values_yt['protons: zuzavg'] = zuzavg / wavg_adj\n values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj\n\n # Photons (momentum in units of m_e c)\n x = ad['photons', 'particle_position_x'].to_ndarray()\n y = ad['photons', 'particle_position_y'].to_ndarray()\n z = ad['photons', 'particle_position_z'].to_ndarray()\n uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c\n w = ad['photons', 'particle_weight'].to_ndarray()\n filt = uz < 0\n\n x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)\n y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)\n z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)\n\n zavg = np.zeros(ds.domain_dimensions)\n uzavg = np.zeros(ds.domain_dimensions)\n zuzavg = np.zeros(ds.domain_dimensions)\n wavg = np.zeros(ds.domain_dimensions)\n uzavg_filt = np.zeros(ds.domain_dimensions)\n wavg_filt = np.zeros(ds.domain_dimensions)\n\n for i_p in range(len(x)):\n zavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * w[i_p]\n uzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p]\n zuzavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]\n wavg[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p]\n uzavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]\n wavg_filt[x_ind[i_p],y_ind[i_p],z_ind[i_p]] += w[i_p] * filt[i_p]\n\n wavg_adj = np.where(wavg == 0, 1, wavg)\n wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)\n values_yt['photons: zavg'] = zavg / wavg_adj\n values_yt['photons: uzavg'] = uzavg / wavg_adj\n values_yt['photons: zuzavg'] = zuzavg / wavg_adj\n values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj\n\n\n values_rd = dict()\n # Load reduced particle diagnostic data from plotfiles\n values_rd['electrons: zavg'] = ad0[('boxlib','z_electrons')]\n values_rd['protons: zavg'] = ad0[('boxlib','z_protons')]\n values_rd['photons: zavg'] = ad0[('boxlib','z_photons')]\n\n values_rd['electrons: uzavg'] = ad0[('boxlib','uz_electrons')]\n values_rd['protons: uzavg'] = ad0[('boxlib','uz_protons')]\n values_rd['photons: uzavg'] = ad0[('boxlib','uz_photons')]\n\n values_rd['electrons: zuzavg'] = ad0[('boxlib','zuz_electrons')]\n values_rd['protons: zuzavg'] = ad0[('boxlib','zuz_protons')]\n values_rd['photons: zuzavg'] = ad0[('boxlib','zuz_photons')]\n\n values_rd['electrons: uzavg_filt'] = ad0[('boxlib','uz_filt_electrons')]\n values_rd['protons: uzavg_filt'] = ad0[('boxlib','uz_filt_protons')]\n values_rd['photons: uzavg_filt'] = ad0[('boxlib','uz_filt_photons')]\n\n values_opmd = dict()\n # Load reduced particle diagnostic data from OPMD output\n values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n\n values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n\n values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n\n values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][io.Mesh_Record_Component.SCALAR].load_chunk()\n opmd.flush()\n del opmd\n\n #--------------------------------------------------------------------------------------------------\n # Part 3: compare values from plotfiles and diagnostics and print output\n #--------------------------------------------------------------------------------------------------\n\n error_plt = dict()\n error_opmd = dict()\n tolerance = 5e-3 if single_precision else 1e-12\n # if single precision, increase tolerance from default value\n check_tolerance = 5e-3 if single_precision else 1e-9\n\n for k in values_yt.keys():\n # check that the zeros line up, since we'll be ignoring them in the error calculation\n assert(np.all((values_yt[k] == 0) == (values_rd[k] == 0)))\n error_plt[k] = np.max(abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])\n print(k, 'relative error plotfile = ', error_plt[k])\n assert(error_plt[k] < tolerance)\n assert(np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)))\n error_opmd[k] = np.max(abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] / abs(values_yt[k])[values_yt[k] != 0])\n assert(error_opmd[k] < tolerance)\n print(k, 'relative error openPMD = ', error_opmd[k])\n\n\n test_name = os.path.split(os.getcwd())[1]\n checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance)\n" ]
[ [ "numpy.all", "numpy.where", "numpy.zeros" ] ]
rgerkin/psiz
[ "d540738462b6436a08a472d5e349ca2b813e6d47" ]
[ "examples/rank/mle_3g.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright 2020 The PsiZ Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Example that infers a shared embedding for three groups.\n\nFake data is generated from a ground truth model for three different\ngroups. In this example, these groups represent groups of agents with\nvarying levels of skill: novices, intermediates, and experts. Each group\nhas a different set of attention weights. An embedding model is\ninferred from the simulated data and compared to the ground truth\nmodel.\n\nExample output:\n Attention weights:\n Novice | [3.38 3.32 0.49 0.43]\n Intermediate | [2.06 2.18 2.04 2.18]\n Expert | [0.55 0.50 3.40 3.32]\n\n Model Comparison (R^2)\n ================================\n True | Inferred\n | Novice Interm Expert\n --------+-----------------------\n Novice | 0.95 0.68 0.16\n Interm | 0.64 0.96 0.54\n Expert | 0.16 0.61 0.96\n\n\"\"\"\n\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\" # noqa\n\nimport numpy as np\nfrom scipy.stats import pearsonr\nimport tensorflow as tf\n\nimport psiz\n\n# Uncomment the following line to force eager execution.\n# tf.config.run_functions_eagerly(True)\n\n# Uncomment and edit the following to control GPU visibility.\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\ndef main():\n \"\"\"Run the simulation that infers an embedding for three groups.\"\"\"\n # Settings.\n n_stimuli = 30\n n_dim = 4\n n_group = 3\n n_restart = 1\n epochs = 1000\n n_trial = 2000\n batch_size = 128\n\n model_true = ground_truth(n_stimuli, n_dim, n_group)\n\n # Generate a random docket of trials to show each group.\n generator = psiz.trials.RandomRank(\n n_stimuli, n_reference=8, n_select=2\n )\n docket = generator.generate(n_trial)\n\n # Create virtual agents for each group.\n agent_novice = psiz.agents.RankAgent(model_true, groups=[0])\n agent_interm = psiz.agents.RankAgent(model_true, groups=[1])\n agent_expert = psiz.agents.RankAgent(model_true, groups=[2])\n\n # Simulate similarity judgments for each group.\n obs_novice = agent_novice.simulate(docket)\n obs_interm = agent_interm.simulate(docket)\n obs_expert = agent_expert.simulate(docket)\n obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert))\n\n # Partition observations into 80% train, 10% validation and 10% test set.\n obs_train, obs_val, obs_test = psiz.utils.standard_split(obs)\n # Convert to TF dataset.\n ds_obs_train = obs_train.as_dataset().shuffle(\n buffer_size=obs_train.n_trial, reshuffle_each_iteration=True\n ).batch(batch_size, drop_remainder=False)\n ds_obs_val = obs_val.as_dataset().batch(\n batch_size, drop_remainder=False\n )\n ds_obs_test = obs_test.as_dataset().batch(\n batch_size, drop_remainder=False\n )\n\n # Use early stopping.\n early_stop = psiz.keras.callbacks.EarlyStoppingRe(\n 'val_cce', patience=15, mode='min', restore_best_weights=True\n )\n callbacks = [early_stop]\n\n compile_kwargs = {\n 'loss': tf.keras.losses.CategoricalCrossentropy(),\n 'optimizer': tf.keras.optimizers.Adam(lr=.001),\n 'weighted_metrics': [\n tf.keras.metrics.CategoricalCrossentropy(name='cce')\n ]\n }\n\n model_inferred = build_model(n_stimuli, n_dim, n_group)\n\n # Infer embedding with restarts.\n restarter = psiz.keras.Restarter(\n model_inferred, compile_kwargs=compile_kwargs, monitor='val_loss',\n n_restart=n_restart\n )\n restart_record = restarter.fit(\n x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs,\n callbacks=callbacks, verbose=0\n )\n model_inferred = restarter.model\n\n # Compare the inferred model with ground truth by comparing the\n # similarity matrices implied by each model.\n simmat_truth = (\n model_similarity(model_true, groups=[0]),\n model_similarity(model_true, groups=[1]),\n model_similarity(model_true, groups=[2])\n )\n\n simmat_inferred = (\n model_similarity(model_inferred, groups=[0]),\n model_similarity(model_inferred, groups=[1]),\n model_similarity(model_inferred, groups=[2])\n )\n\n r_squared = np.empty((n_group, n_group))\n for i_truth in range(n_group):\n for j_infer in range(n_group):\n rho, _ = pearsonr(simmat_truth[i_truth], simmat_inferred[j_infer])\n r_squared[i_truth, j_infer] = rho**2\n\n # Display attention weights.\n # Permute inferred dimensions to best match ground truth.\n attention_weight = tf.stack(\n [\n model_inferred.kernel.subnets[0].distance.w,\n model_inferred.kernel.subnets[1].distance.w,\n model_inferred.kernel.subnets[2].distance.w\n ],\n axis=0\n ).numpy()\n idx_sorted = np.argsort(-attention_weight[0, :])\n attention_weight = attention_weight[:, idx_sorted]\n group_labels = [\"Novice\", \"Intermediate\", \"Expert\"]\n print(\"\\n Attention weights:\")\n for i_group in range(attention_weight.shape[0]):\n print(\" {0:>12} | {1}\".format(\n group_labels[i_group],\n np.array2string(\n attention_weight[i_group, :],\n formatter={'float_kind': lambda x: \"%.2f\" % x})\n )\n )\n\n # Display comparison results. A good inferred model will have a high\n # R^2 value on the diagonal elements (max is 1) and relatively low R^2\n # values on the off-diagonal elements.\n print('\\n Model Comparison (R^2)')\n print(' ================================')\n print(' True | Inferred')\n print(' | Novice Interm Expert')\n print(' --------+-----------------------')\n print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(\n r_squared[0, 0], r_squared[0, 1], r_squared[0, 2]))\n print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(\n r_squared[1, 0], r_squared[1, 1], r_squared[1, 2]))\n print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(\n r_squared[2, 0], r_squared[2, 1], r_squared[2, 2]))\n print('\\n')\n\n\ndef ground_truth(n_stimuli, n_dim, n_group):\n \"\"\"Return a ground truth embedding.\"\"\"\n stimuli = tf.keras.layers.Embedding(\n n_stimuli+1, n_dim, mask_zero=True,\n embeddings_initializer=tf.keras.initializers.RandomNormal(\n stddev=.17\n )\n )\n\n shared_similarity = psiz.keras.layers.ExponentialSimilarity(\n trainable=False,\n beta_initializer=tf.keras.initializers.Constant(10.),\n tau_initializer=tf.keras.initializers.Constant(1.),\n gamma_initializer=tf.keras.initializers.Constant(0.)\n )\n\n # Define group-specific kernels.\n kernel_0 = psiz.keras.layers.DistanceBased(\n distance=psiz.keras.layers.Minkowski(\n rho_trainable=False,\n rho_initializer=tf.keras.initializers.Constant(2.),\n w_initializer=tf.keras.initializers.Constant(\n [1.8, 1.8, .2, .2]\n ),\n w_constraint=psiz.keras.constraints.NonNegNorm(\n scale=n_dim, p=1.\n ),\n ),\n similarity=shared_similarity\n )\n\n kernel_1 = psiz.keras.layers.DistanceBased(\n distance=psiz.keras.layers.Minkowski(\n rho_trainable=False,\n rho_initializer=tf.keras.initializers.Constant(2.),\n w_initializer=tf.keras.initializers.Constant(\n [1., 1., 1., 1.]\n ),\n w_constraint=psiz.keras.constraints.NonNegNorm(\n scale=n_dim, p=1.\n ),\n ),\n similarity=shared_similarity\n )\n\n kernel_2 = psiz.keras.layers.DistanceBased(\n distance=psiz.keras.layers.Minkowski(\n rho_trainable=False,\n rho_initializer=tf.keras.initializers.Constant(2.),\n w_initializer=tf.keras.initializers.Constant(\n [.2, .2, 1.8, 1.8]\n ),\n w_constraint=psiz.keras.constraints.NonNegNorm(\n scale=n_dim, p=1.\n ),\n ),\n similarity=shared_similarity\n )\n\n kernel_group = psiz.keras.layers.GateMulti(\n subnets=[kernel_0, kernel_1, kernel_2], group_col=0\n )\n\n model = psiz.keras.models.Rank(\n stimuli=stimuli, kernel=kernel_group, use_group_kernel=True\n )\n\n return model\n\n\ndef build_model(n_stimuli, n_dim, n_group):\n \"\"\"Build model.\n\n Arguments:\n n_stimuli: Integer indicating the number of stimuli in the\n embedding.\n n_dim: Integer indicating the dimensionality of the embedding.\n\n Returns:\n model: A TensorFlow Keras model.\n\n \"\"\"\n stimuli = tf.keras.layers.Embedding(\n n_stimuli+1, n_dim, mask_zero=True,\n )\n\n shared_similarity = psiz.keras.layers.ExponentialSimilarity(\n trainable=False,\n beta_initializer=tf.keras.initializers.Constant(10.),\n tau_initializer=tf.keras.initializers.Constant(1.),\n gamma_initializer=tf.keras.initializers.Constant(0.)\n )\n\n kernel_0 = build_kernel(shared_similarity, n_dim)\n kernel_1 = build_kernel(shared_similarity, n_dim)\n kernel_2 = build_kernel(shared_similarity, n_dim)\n kernel_group = psiz.keras.layers.GateMulti(\n subnets=[kernel_0, kernel_1, kernel_2], group_col=0\n )\n\n model = psiz.keras.models.Rank(\n stimuli=stimuli, kernel=kernel_group, use_group_kernel=True\n )\n\n return model\n\n\ndef build_kernel(similarity, n_dim):\n \"\"\"Build kernel for single group.\"\"\"\n mink = psiz.keras.layers.Minkowski(\n rho_trainable=False,\n rho_initializer=tf.keras.initializers.Constant(2.),\n w_constraint=psiz.keras.constraints.NonNegNorm(\n scale=n_dim, p=1.\n ),\n )\n\n kernel = psiz.keras.layers.DistanceBased(\n distance=mink,\n similarity=similarity\n )\n return kernel\n\n\ndef model_similarity(model, groups=[]):\n ds_pairs, ds_info = psiz.utils.pairwise_index_dataset(\n model.n_stimuli, mask_zero=True, groups=groups\n )\n simmat = psiz.utils.pairwise_similarity(\n model.stimuli, model.kernel, ds_pairs, use_group_kernel=True\n ).numpy()\n\n return simmat\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.empty", "scipy.stats.pearsonr", "tensorflow.keras.layers.Embedding", "tensorflow.keras.initializers.RandomNormal", "tensorflow.keras.losses.CategoricalCrossentropy", "numpy.argsort", "tensorflow.keras.metrics.CategoricalCrossentropy", "tensorflow.stack", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.initializers.Constant", "numpy.array2string" ] ]
jsiloto/adaptive-cob
[ "eb38e3b52c4927e3ac0a897142ad26fbc4eb82de" ]
[ "src/mimic_fix.py" ]
[ "import argparse\nimport datetime\nimport time\n\nimport torch\nfrom torch import distributed as dist\nfrom torch.nn import DataParallel\nfrom torch.nn.parallel.distributed import DistributedDataParallel\n\nfrom distillation.tool import DistillationBox\nfrom models import load_ckpt, get_model, save_ckpt, set_bottleneck_transformer\nfrom myutils.common import file_util, yaml_util\nfrom myutils.pytorch import func_util, module_util\nfrom utils import data_util, main_util, misc_util\nfrom models.mimic.base import set_width\nfrom models.slimmable.compute_post_bn import ComputeBN\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\n\ndef freeze_batch_norm_outside_bottleneck(model):\n for m in model.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n model.backbone_net.bottleneck.train()\n\n\ndef get_argparser():\n argparser = argparse.ArgumentParser(description='Mimic Runner')\n argparser.add_argument('--config', required=True, help='yaml file path')\n argparser.add_argument('--device', default='cuda', help='device')\n argparser.add_argument('--json', help='dictionary to overwrite config')\n argparser.add_argument('-distill', action='store_true', help='distill a teacher model')\n argparser.add_argument('-skip_teacher_eval', action='store_true', help='skip teacher model evaluation in testing')\n argparser.add_argument('-transform_bottleneck', action='store_true',\n help='use bottleneck transformer (if defined in yaml) in testing')\n argparser.add_argument('-post_bn', action='store_true', help='use post traing batch norm calculation')\n # distributed training parameters\n argparser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')\n argparser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n return argparser\n\n\ndef freeze_modules(student_model, student_model_config):\n if 'frozen_modules' in student_model_config:\n for student_path in student_model_config['frozen_modules']:\n student_module = module_util.get_module(student_model, student_path)\n module_util.freeze_module_params(student_module)\n\n elif 'unfrozen_modules' in student_model_config:\n module_util.freeze_module_params(student_model)\n for student_path in student_model_config['unfrozen_modules']:\n student_module = module_util.get_module(student_model, student_path)\n module_util.unfreeze_module_params(student_module)\n\n\ndef distill_model(distillation_box, data_loader, optimizer, log_freq, device, epoch):\n metric_logger = misc_util.MetricLogger(delimiter=' ')\n metric_logger.add_meter('lr', misc_util.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n lr_scheduler = None\n if epoch == 0:\n warmup_factor = 1.0 / 1000.0\n warmup_iters = min(1000, len(data_loader) - 1)\n lr_scheduler = main_util.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)\n\n for images, targets in metric_logger.log_every(data_loader, log_freq, header):\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n\n optimizer.zero_grad()\n loss = distillation_box(images, targets)\n loss.backward()\n optimizer.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n metric_logger.update(loss=loss)\n metric_logger.update(lr=optimizer.param_groups[0]['lr'])\n # torch.cuda.empty_cache()\n\n\ndef distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader,\n device, distributed, distill_backbone_only, config, args):\n train_config = config['train']\n student_config = config['student_model']\n distillation_box = DistillationBox(teacher_model, student_model,\n train_config['criterion'], student_config)\n ckpt_file_path = config['student_model']['ckpt']\n optim_config = train_config['optimizer']\n optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params'])\n scheduler_config = train_config['scheduler']\n lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params'])\n if file_util.check_if_exists(ckpt_file_path):\n best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)\n save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)\n\n\ndef main(args):\n config = yaml_util.load_yaml_file(args.config)\n if args.json is not None:\n main_util.overwrite_config(config, args.json)\n\n distributed, device_ids = main_util.init_distributed_mode(args.world_size, args.dist_url)\n device = torch.device(args.device if torch.cuda.is_available() else 'cpu')\n teacher_model = get_model(config['teacher_model'], device)\n module_util.freeze_module_params(teacher_model)\n student_model_config = config['student_model']\n student_model = get_model(student_model_config, device)\n freeze_modules(student_model, student_model_config)\n ckpt_file_path = config['student_model']['ckpt']\n train_config = config['train']\n optim_config = train_config['optimizer']\n optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params'])\n scheduler_config = train_config['scheduler']\n lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params'])\n if file_util.check_if_exists(ckpt_file_path):\n best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)\n save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)\n\n\nif __name__ == '__main__':\n parser = get_argparser()\n main(parser.parse_args())\n" ]
[ [ "torch.cuda.is_available" ] ]
iacChris/ROAR
[ "9404b41cd751bd3c5d644e80bf2f1d4ace392a58" ]
[ "Bridges/carla_bridge.py" ]
[ "import carla\nfrom carla import ColorConverter as cc\nfrom ROAR_Sim.carla_client.util.sensors import IMUSensor\nfrom Bridges.bridge import Bridge\nfrom typing import Union\nfrom ROAR.utilities_module.vehicle_models import (\n VehicleControl,\n Vehicle,\n)\nfrom ROAR.utilities_module.data_structures_models import (\n Location,\n Rotation,\n RGBData,\n DepthData,\n SensorsData,\n IMUData,\n Vector3D,\n Transform,\n)\n\nfrom ROAR.utilities_module.utilities import png_to_depth\nimport numpy as np\nimport cv2\n\n\nclass CarlaBridge(Bridge):\n def convert_location_from_source_to_agent(self, source: carla.Location) -> Location:\n\n \"\"\"\n Convert Location data from Carla.location to Agent's lcoation data type\n invert the Z axis to make it into right hand coordinate system\n Args:\n source: carla.location\n\n Returns:\n\n \"\"\"\n return Location(x=source.x, y=source.z, z=source.y)\n\n def convert_rotation_from_source_to_agent(self, source: carla.Rotation) -> Rotation:\n \"\"\"Convert a CARLA raw rotation to Rotation(pitch=float,yaw=float,roll=float).\"\"\"\n\n return Rotation(pitch=source.yaw, yaw=source.pitch, roll=source.roll)\n\n def convert_transform_from_source_to_agent(\n self, source: carla.Transform\n ) -> Transform:\n \"\"\"Convert CARLA raw location and rotation to Transform(location,rotation).\"\"\"\n return Transform(\n location=self.convert_location_from_source_to_agent(source=source.location),\n rotation=self.convert_rotation_from_source_to_agent(source=source.rotation),\n )\n\n def convert_control_from_source_to_agent(\n self, source: carla.VehicleControl\n ) -> VehicleControl:\n \"\"\"Convert CARLA raw vehicle control to VehicleControl(throttle,steering).\"\"\"\n\n return VehicleControl(\n throttle=-1 * source.throttle if source.reverse else source.throttle,\n steering=source.steer,\n )\n\n def convert_rgb_from_source_to_agent(\n self, source: carla.Image\n ) -> Union[RGBData, None]:\n \"\"\"Convert CARLA raw Image to a Union with RGB numpy array\"\"\"\n\n try:\n source.convert(cc.Raw)\n return RGBData(data=self._to_rgb_array(source))\n except:\n return None\n\n def convert_depth_from_source_to_agent(\n self, source: carla.Image\n ) -> Union[DepthData, None]:\n \"\"\"Convert CARLA raw depth info to \"\"\"\n try:\n array = np.frombuffer(source.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (source.height, source.width, 4)) # BGRA\n array = array[:, :, :3] # BGR\n array = array[:, :, ::-1] # RGB\n # array = array.swapaxes(0, 1)\n array = png_to_depth(array)\n # print(array[350][160], array[350][688])\n return DepthData(data=array)\n except:\n return None\n\n def convert_vector3d_from_source_to_agent(self, source: carla.Vector3D) -> Vector3D:\n return Vector3D(x=source.x, y=source.y, z=source.z)\n\n def convert_imu_from_source_to_agent(self, source: IMUSensor) -> IMUData:\n return IMUData(\n accelerometer=Vector3D(\n x=source.accelerometer[0],\n y=source.accelerometer[1],\n z=source.accelerometer[2],\n ),\n gyroscope=Vector3D(\n x=source.gyroscope[0], y=source.gyroscope[1], z=source.gyroscope[2]\n ),\n )\n\n def convert_sensor_data_from_source_to_agent(self, source: dict) -> SensorsData:\n return SensorsData(\n front_rgb=self.convert_rgb_from_source_to_agent(\n source=source.get(\"front_rgb\", None)\n ),\n rear_rgb=self.convert_rgb_from_source_to_agent(\n source=source.get(\"rear_rgb\", None)\n ),\n front_depth=self.convert_depth_from_source_to_agent(\n source=source.get(\"front_depth\", None)\n ),\n imu_data=self.convert_imu_from_source_to_agent(\n source=source.get(\"imu\", None)\n ),\n )\n\n def convert_vehicle_from_source_to_agent(self, source: carla.Vehicle) -> Vehicle:\n control: VehicleControl = self.convert_control_from_source_to_agent(\n source.get_control()\n )\n # this is cheating here, vehicle does not know its own location\n transform: Transform = self.convert_transform_from_source_to_agent(\n source.get_transform()\n )\n velocity: Vector3D = self.convert_vector3d_from_source_to_agent(\n source.get_velocity()\n )\n return Vehicle(velocity=velocity, transform=transform, control=control)\n\n def convert_control_from_agent_to_source(\n self, control: VehicleControl\n ) -> carla.VehicleControl:\n return carla.VehicleControl(\n throttle=abs(control.throttle),\n steer=control.steering,\n brake=0,\n hand_brake=False,\n reverse=True if control.throttle < 0 else False,\n manual_gear_shift=False,\n gear=1,\n )\n\n def convert_vector3d_from_agent_to_source(\n self, vector3d: Vector3D\n ) -> carla.Vector3D:\n return carla.Vector3D(x=vector3d.x, y=vector3d.y, z=vector3d.z)\n\n def convert_location_from_agent_to_source(self, source: Location) -> carla.Location:\n return carla.Location(x=source.x, y=source.z, z=source.y)\n\n def convert_rotation_from_agent_to_source(self, source: Rotation) -> carla.Rotation:\n return carla.Rotation(pitch=source.yaw, yaw=source.pitch, roll=source.roll)\n\n def convert_transform_from_agent_to_source(\n self, source: Transform\n ) -> carla.Transform:\n return carla.Transform(\n location=self.convert_location_from_agent_to_source(source=source.location),\n rotation=self.convert_rotation_from_agent_to_source(source=source.rotation),\n )\n\n def _to_bgra_array(self, image):\n \"\"\"Convert a CARLA raw image to a BGRA numpy array.\"\"\"\n if not isinstance(image, carla.Image):\n raise ValueError(\"Argument must be a carla.sensor.Image\")\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n return array\n\n def _to_rgb_array(self, image):\n \"\"\"Convert a CARLA raw image to a RGB numpy array.\"\"\"\n array = self._to_bgra_array(image)\n # Convert BGRA to RGB.\n array = array[:, :, :3]\n # array = array[:, :, ::-1]\n return array\n\n" ]
[ [ "numpy.reshape", "numpy.dtype" ] ]
rgerum/saenopy
[ "18197afac266ef9f35c1c9c89c195db96dc782ab" ]
[ "docs/regularization.py" ]
[ "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\r\nfrom saenopy import Solver\r\n \r\n# initialize the object\r\nM = Solver()\r\n\r\nfrom saenopy.materials import SemiAffineFiberMaterial\r\n\r\n# provide a material model\r\nmaterial = SemiAffineFiberMaterial(1645, 0.0008, 1.0075, 0.033)\r\nM.setMaterialModel(material)\r\n\r\nimport numpy as np\r\n\r\n# define the coordinates of the nodes of the mesh\r\n# the array has to have the shape N_v x 3\r\nR = np.array([[0., 0., 0.], # 0\r\n [0., 1., 0.], # 1\r\n [1., 1., 0.], # 2\r\n [1., 0., 0.], # 3\r\n [0., 0., 1.], # 4\r\n [1., 0., 1.], # 5\r\n [1., 1., 1.], # 6\r\n [0., 1., 1.]]) # 7\r\n\r\n# define the tetrahedra of the mesh\r\n# the array has to have the shape N_t x 4\r\n# every entry is an index referencing a verces in R (indices start with 0)\r\nT = np.array([[0, 1, 7, 2],\r\n [0, 2, 5, 3],\r\n [0, 4, 5, 7],\r\n [2, 5, 6, 7],\r\n [0, 7, 5, 2]])\r\n\r\n# provide the node data\r\nM.setNodes(R)\r\n# and the tetrahedron data\r\nM.setTetrahedra(T)\r\n\r\n# the displacements of the nodes which shall be fitted\r\n# during the solving\r\nU = np.array([[0 , 0, 0], # 0\r\n [0 , 0, 0], # 1\r\n [0.01, 0, 0], # 2\r\n [0.01, 0, 0], # 3\r\n [0 , 0, 0], # 4\r\n [0.01, 0, 0], # 5\r\n [0.01, 0, 0], # 6\r\n [0 , 0, 0]]) # 7\r\n\r\n# hand the displacements over to the class instance\r\nM.setTargetDisplacements(U)\r\n\r\n# call the regularisation\r\nM.solve_regularized(stepper=0.1, alpha=0.001);\r\n\r\n\r\nM.viewMesh(50, 1)\r\n\r\n" ]
[ [ "numpy.array" ] ]
gmum/cwae
[ "50592903c321de25f339f3b00cbd2143741e5037" ]
[ "src/cw.py" ]
[ "import tensorflow as tf\nimport math as m\nfrom rec_errors import euclidean_norm_squared\n\n\ndef silverman_rule_of_thumb(N: int):\n return tf.pow(4/(3*N), 0.4)\n\n\ndef cw_1d(X, y=None):\n\n def N0(mean, variance):\n return 1.0/(tf.sqrt(2.0 * m.pi * variance)) * tf.exp((-(mean**2))/(2*variance))\n\n N = tf.cast(tf.shape(X)[0], tf.float32)\n if y is None:\n y = silverman_rule_of_thumb(N)\n\n A = tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1))\n return (1.0/(N*N)) * tf.reduce_sum(N0(A, 2*y)) + N0(0.0, 2.0 + 2*y) - (2/N) * tf.reduce_sum(N0(X, 1.0 + 2*y))\n\n\ndef cw_2d(X, y=None):\n def __phi(x):\n def __phi_f(s):\n t = s/7.5\n return tf.exp(-s/2) * (1 + 3.5156229*t**2 + 3.0899424*t**4 + 1.2067492*t**6 + 0.2659732*t**8\n + 0.0360768*t**10 + 0.0045813*t**12)\n\n def __phi_g(s):\n t = s/7.5\n return tf.sqrt(2/s) * (0.39894228 + 0.01328592*t**(-1) + 0.00225319*t**(-2) - 0.00157565*t**(-3)\n + 0.0091628*t**(-4) - 0.02057706*t**(-5) + 0.02635537*t**(-6) - 0.01647633*t**(-7)\n + 0.00392377*t**(-8))\n\n a = 7.5\n return __phi_f(tf.minimum(x, a)) - __phi_f(a) + __phi_g(tf.maximum(x, a))\n\n N = tf.cast(tf.shape(X)[0], tf.float32)\n if y is None:\n y = silverman_rule_of_thumb(N)\n\n A = 1/(N*N*tf.sqrt(y))\n B = 2.0/(N*tf.sqrt(y+0.5))\n\n A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)/(4*y)\n B1 = euclidean_norm_squared(X, axis=1)/(2+4*y)\n return 1/tf.sqrt(1+y) + A*tf.reduce_sum(__phi(A1)) - B*tf.reduce_sum(__phi(B1))\n\n\ndef cw(X, y=None):\n D = tf.cast(tf.shape(X)[1], tf.float32)\n N = tf.cast(tf.shape(X)[0], tf.float32)\n if y is None:\n y = silverman_rule_of_thumb(N)\n\n K = 1/(2*D-3)\n\n A1 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)\n A = (1/(N**2)) * tf.reduce_sum((1/tf.sqrt(y + K*A1)))\n\n B1 = euclidean_norm_squared(X, axis=1)\n B = (2/N)*tf.reduce_sum((1/tf.sqrt(y + 0.5 + K*B1)))\n\n return (1/tf.sqrt(1+y)) + A - B\n\n\ndef cw_choose(z_dim: int):\n if z_dim == 1:\n return cw_1d\n elif z_dim == 2:\n return cw_2d\n elif z_dim >= 20:\n return cw\n else:\n raise ValueError('Not defined for this latent dimension')\n\n\ndef cw_sampling(X, y=None):\n def phi_sampling(s, D):\n return tf.pow(1.0 + 4.0*s/(2.0*D-3), -0.5)\n\n D = tf.cast(tf.shape(X)[1], tf.float32)\n N = tf.cast(tf.shape(X)[0], tf.float32)\n D_int = tf.cast(D, tf.int32)\n N_int = tf.cast(N, tf.int32)\n if y is None:\n y = silverman_rule_of_thumb(N)\n\n YDistr = tf.contrib.distributions.MultivariateNormalDiag(loc=tf.zeros(D_int, tf.float32), \n scale_diag=tf.ones(D_int, tf.float32))\n Y = YDistr.sample(N_int)\n T = 1.0/(2.0*N*tf.sqrt(m.pi*y))\n\n A0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(X, 1)), axis=2)\n A = tf.reduce_sum(phi_sampling(A0/(4*y), D))\n\n B0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(Y, 0), tf.expand_dims(Y, 1)), axis=2)\n B = tf.reduce_sum(phi_sampling(B0/(4*y), D))\n\n C0 = euclidean_norm_squared(tf.subtract(tf.expand_dims(X, 0), tf.expand_dims(Y, 1)), axis=2)\n C = tf.reduce_sum(phi_sampling(C0/(4*y), D))\n\n return T*(A + B - 2*C)\n" ]
[ [ "tensorflow.exp", "tensorflow.shape", "tensorflow.zeros", "tensorflow.minimum", "tensorflow.expand_dims", "tensorflow.ones", "tensorflow.sqrt", "tensorflow.maximum", "tensorflow.pow", "tensorflow.cast" ] ]
j2slab/MLStudio
[ "7d7c4b1073617968c28f0e496020e4720b552451", "7d7c4b1073617968c28f0e496020e4720b552451" ]
[ "tests/test_data_services/test_preprocessing.py", "MLStudio/supervised/algorithms/optimization/gradient_descent.py" ]
[ "# -*- coding:utf-8 -*-\n# =========================================================================== #\n# Project : MLStudio #\n# File : \\test_preprocessing.py #\n# Python : 3.8.3 #\n# --------------------------------------------------------------------------- #\n# Author : John James #\n# Company : nov8.ai #\n# Email : jjames@nov8.ai #\n# URL : https://github.com/nov8ai/MLStudio #\n# --------------------------------------------------------------------------- #\n# Created : Saturday, July 25th 2020, 9:54:15 pm #\n# Last Modified : Saturday, July 25th 2020, 9:54:15 pm #\n# Modified By : John James (jjames@nov8.ai) #\n# --------------------------------------------------------------------------- #\n# License : BSD #\n# Copyright (c) 2020 nov8.ai #\n# =========================================================================== #\n\"\"\"Tests data preprocessing pipeline.\"\"\"\n#%%\nimport numpy as np\nimport pytest\nfrom pytest import mark\nfrom scipy.sparse import csr_matrix\nfrom sklearn.datasets import make_classification, make_regression\n\nfrom mlstudio.factories.data import DataProcessors\n\n# -------------------------------------------------------------------------- # \ndef check_add_bias(X, X_train, test):\n assert X_train.shape[1] == X.shape[1] + 1, test + \": bias term wasn't added.\"\n\ndef check_split(X, y, X_train, y_train, X_val, y_val, test):\n assert X_train.shape[1] == X.shape[1] + 1, test + \": bias term wasn't added.\" \n assert X.shape[0] > X_train.shape[0], test + \": split didn't happen.\" \n assert X_train.shape[0] == y_train.shape[0], test + \": X, y shape mismatch.\"\n assert X_val.shape[0] == y_val.shape[0], test + \": X, y shape mismatch.\" \n assert X_train.shape[0] > X_val.shape[0], test + \": Train size not greater than test.\" \n\ndef check_label_encoder(y, test):\n assert all(y) in range(len(np.unique(y))), test + \": label encoding didn't work\"\n\ndef check_one_hot_label_encoder(y, test):\n assert np.sum(y) == y.shape[0], test + \": one-hot-label encoding didn't binarize\"\n assert y.shape[1] > 2, test + \": one-hot-label encoding didn't create vector.\"\n\n\n@mark.data_processing\n@mark.regression_data\nclass RegressionDataTests:\n\n _test = \"Regression data\"\n\n def test_regression_train_data(self, get_regression_data):\n X, y = get_regression_data\n data_processor = DataProcessors.regression\n data = data_processor().process_train_data(X, y)\n check_add_bias(X, data['X_train']['data'],test = self._test) \n\n def test_regression_train_val_data(self, get_regression_data):\n X, y = get_regression_data\n data_processor = DataProcessors.regression\n data = data_processor().process_train_val_data(X, y, val_size=0.3)\n check_add_bias(X, data['X_train']['data'], test = self._test) \n check_add_bias(X, data['X_val']['data'], test = self._test) \n check_split(X, y, data['X_train']['data'], data['y_train']['data'], data['X_val']['data'], data['y_val']['data'], test=self._test)\n\n def test_regression_X_test_data(self, get_regression_data):\n X, y = get_regression_data\n data_processor = DataProcessors.regression\n data = data_processor().process_X_test_data(X)\n check_add_bias(X, data['X_test']['data'], test = self._test) \n\n@mark.data_processing\n@mark.binaryclass_data\nclass BinaryClassDataTests:\n\n _test = \"Binary classification data\"\n\n def test_binaryclass_train_data(self, get_logistic_regression_data):\n X, y = get_logistic_regression_data\n y = np.random.choice([\"hat\", \"bowl\"], size=y.shape[0])\n data_processor = DataProcessors.binaryclass\n data = data_processor().process_train_data(X, y)\n check_add_bias(X, data['X_train']['data'],test = self._test) \n\n def test_binaryclass_train_val_data(self, get_logistic_regression_data):\n X, y = get_logistic_regression_data\n y = np.random.choice([\"hat\", \"bowl\"], size=y.shape[0])\n data_processor = DataProcessors.binaryclass\n data = data_processor().process_train_val_data(X, y, val_size=0.3)\n check_add_bias(X, data['X_train']['data'], test = self._test) \n check_add_bias(X, data['X_val']['data'], test = self._test) \n check_split(X, y, data['X_train']['data'], data['y_train']['data'], data['X_val']['data'], data['y_val']['data'], test=self._test) \n check_label_encoder(data['y_train']['data'], test=self._test)\n check_label_encoder(data['y_val']['data'], test=self._test)\n\n def test_binaryclass_X_test_data(self, get_logistic_regression_data):\n X, y = get_logistic_regression_data\n y = np.random.choice([\"hat\", \"bowl\"], size=y.shape[0])\n data_processor = DataProcessors.binaryclass\n data = data_processor().process_X_test_data(X)\n check_add_bias(X, data['X_test']['data'],test = self._test) \n\n def test_binaryclass_y_test_data(self, get_logistic_regression_data):\n X, y = get_logistic_regression_data\n y = np.random.choice([\"hat\", \"bowl\"], size=y.shape[0]) \n data_processor = DataProcessors.binaryclass\n data = data_processor().process_y_test_data(y) \n check_label_encoder(data['y_test']['data'], test=self._test)\n\n@mark.data_processing\n@mark.multiclass_data\nclass MultiClassDataTests:\n\n _test = \"Multi classification data\"\n\n def test_multiclass_train_data(self, get_multiclass_data):\n X, y = get_multiclass_data\n y = np.random.choice([\"hat\", \"bowl\", \"junky\", \"riding\", \"happy\"], size=y.shape[0])\n data_processor = DataProcessors.multiclass\n data = data_processor().process_train_data(X, y)\n check_add_bias(X, data['X_train']['data'],test = self._test) \n\n def test_multiclass_train_val_data(self, get_multiclass_data):\n X, y = get_multiclass_data\n y = np.random.choice([\"hat\", \"bowl\", \"junky\", \"riding\", \"happy\"], size=y.shape[0])\n data_processor = DataProcessors.multiclass\n data = data_processor().process_train_val_data(X, y, val_size=0.3)\n check_add_bias(X, data['X_train']['data'], test = self._test) \n check_add_bias(X, data['X_val']['data'], test = self._test) \n check_split(X, y, data['X_train']['data'], data['y_train']['data'], data['X_val']['data'], data['y_val']['data'], test=self._test)\n check_one_hot_label_encoder(data['y_train']['data'], test=self._test)\n check_one_hot_label_encoder(data['y_val']['data'], test=self._test)\n\n def test_multiclass_X_test_data(self, get_multiclass_data):\n X, y = get_multiclass_data\n y = np.random.choice([\"hat\", \"bowl\", \"junky\", \"riding\", \"happy\"], size=y.shape[0])\n data_processor = DataProcessors.multiclass\n data = data_processor().process_X_test_data(X)\n check_add_bias(X, data['X_test']['data'],test = self._test) \n\n def test_multiclass_y_test_data(self, get_multiclass_data):\n X, y = get_multiclass_data\n y = np.random.choice([\"hat\", \"bowl\", \"junky\", \"riding\", \"happy\"], size=y.shape[0])\n data_processor = DataProcessors.multiclass\n data = data_processor().process_y_test_data(y) \n check_one_hot_label_encoder(data['y_test']['data'], test=self._test)\n \n ", "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# =========================================================================== #\n# Project : ML Studio #\n# Version : 0.1.0 #\n# File : gradient_descent.py #\n# Python : 3.8.3 #\n# -------------------------------------------------------------------------- #\n# Author : John James #\n# Company : DecisionScients #\n# Email : jjames@decisionscients.com #\n# URL : https://github.com/decisionscients/MLStudio #\n# -------------------------------------------------------------------------- #\n# Created : Wednesday, March 18th 2020, 4:34:57 am #\n# Last Modified : Saturday, June 13th 2020, 9:52:07 pm #\n# Modified By : John James (jjames@decisionscients.com) #\n# -------------------------------------------------------------------------- #\n# License : BSD #\n# Copyright (c) 2020 DecisionScients #\n# =========================================================================== #\n#%%\n\"\"\"Gradient Descent Module\"\"\"\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom collections import OrderedDict\nimport copy\nimport warnings\nfrom pathlib import Path\nimport site\nimport time\nimport tracemalloc\nPROJECT_DIR = Path(__file__).resolve().parents[4]\nsite.addsitedir(PROJECT_DIR)\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin\nfrom tabulate import tabulate\n\nfrom mlstudio.utils.data_manager import unpack_parameters\nfrom mlstudio.utils.data_manager import batch_iterator\nfrom mlstudio.utils import validation\n# =========================================================================== #\n# GRADIENT DESCENT #\n# =========================================================================== # \nclass GradientDescent(ABC, BaseEstimator):\n \"\"\"Gradient descent abstract base class for all estimators.\n \n Performs gradient descent optimization to estimate the parameters theta\n that best fit the data.\n\n Parameters\n ----------\n eta0 : float\n The initial learning rate on open interval (0,1) \n\n epochs : int\n The number of epochs to execute \n\n batch_size : None or int (default=None) \n The number of observations to include in each batch. This also \n specifies the gradient descent variant according to the following:\n\n Batch_Size Variant\n ---------- -----------------------\n None Batch Gradient Descent\n 1 Stochastic Gradient Descent\n Other int Minibatch Gradient Descent \n\n\n val_size : float in interval [0,1) (default=0.3)\n The proportion of the training set to allocate a validation set\n\n theta_init : array_like\n Contains the initial values for the parameters theta. Should include\n the bias parameter in addition to the feature parameters.\n\n optimizer : An Optimizer object or None\n The optimization algorithm to use. If None, the generic \n GradientDescentOptimizer will be used.\n\n metric : a Metric object (default=None)\n Supported Metric object for estimating performance. \n\n early_stop : an EarlyStop object or None (default=None)\n Class responsible for stopping the optimization process once\n training has stabilized. \n\n learning_rate : LearningRateSchedule object or None (default=None)\n This optional parameter can be a supported LearningRateSchedule\n object.\n\n observer_list : an ObserverListobject\n Manages observers and subscriptions\n\n progress : Progress observer object\n Reports optimization statistics each 'verbose' epochs.\n\n blackblox : Blackbox Object\n Tracks training and validation metrics during optimization. \n\n verbose : Bool or Int\n If False, the parameter is ignored. If an integer is provided, this \n will be the number of epochs between progress reports.\n\n summary : Summary object\n Reports summary data at end of training.\n\n random_state : int or None\n If an int, this will be the random state used anywhere pseudo-randomization\n occurs.\n \n \"\"\"\n\n def __init__(self, eta0=0.01, epochs=1000, batch_size=None, val_size=0.3, \n loss=None, data_processor=None, activation=None,\n theta_init=None, optimizer=None, scorer=None, early_stop=None, \n learning_rate=None, observer_list=None, progress=None, \n blackbox=None, summary=None, verbose=False, random_state=None,\n check_gradient=False, gradient_checker=None):\n\n self.eta0 = eta0\n self.epochs = epochs\n self.batch_size = batch_size\n self.val_size = val_size\n self.loss = loss\n self.data_processor = data_processor\n self.activation = activation\n self.theta_init = theta_init\n self.optimizer = optimizer \n self.scorer = scorer \n self.early_stop=early_stop \n self.learning_rate = learning_rate\n self.observer_list = observer_list\n self.progress = progress\n self.blackbox = blackbox\n self.summary = summary\n self.verbose = verbose\n self.random_state = random_state \n self.check_gradient = check_gradient\n self.gradient_checker = gradient_checker\n\n # ----------------------------------------------------------------------- # \n @property\n def variant(self):\n \"\"\"Returns the gradient descent variant based upon the batch size.\"\"\"\n if self.batch_size is None:\n variant = \"Batch Gradient Descent\"\n elif self.batch_size == 1:\n variant = \"Stochastic Gradient Descent\" \n else:\n variant = \"Minibatch Gradient Descent\" \n return variant\n\n # ----------------------------------------------------------------------- # \n @property\n def eta(self):\n return self._eta\n\n @eta.setter \n def eta(self, x):\n self._eta = x\n\n @property\n def converged(self):\n return self._converged\n\n @converged.setter\n def converged(self, x):\n validation.validate_bool(x)\n self._converged = x \n\n @property\n def loss(self):\n return self._loss\n\n @loss.setter\n def loss(self, x):\n self._loss = x\n\n @property\n def activation(self):\n return self._activation\n\n @activation.setter\n def activation(self, x):\n self._activation = x\n\n @property\n def theta(self):\n return self._theta\n\n @property\n def train_data_package(self):\n return self._train_data_package\n\n def get_blackbox(self):\n return self._blackbox\n\n def get_scorer(self):\n try:\n scorer = self._scorer\n except:\n scorer = self.scorer\n return scorer\n\n def set_scorer(self, x):\n validation.validate_scorer(self, x)\n self._scorer = x\n\n # ----------------------------------------------------------------------- #\n def _compile(self, log=None):\n \"\"\"Makes copies of mutable parameters and makes them private members.\"\"\"\n\n self._eta = self.learning_rate.eta0 if self.learning_rate else self.eta0 \n self._loss = copy.deepcopy(self.loss) \n self._activation = copy.deepcopy(self.activation)\n self._data_processor = copy.deepcopy(self.data_processor)\n self._observer_list = copy.deepcopy(self.observer_list) \n self._optimizer = copy.deepcopy(self.optimizer)\n self._scorer = copy.deepcopy(self.scorer)\n self._progress = copy.deepcopy(self.progress)\n self._summary = copy.deepcopy(self.summary) \n self._gradient_checker = copy.deepcopy(self.gradient_checker)\n self._blackbox = copy.deepcopy(self.blackbox)\n self._tracemalloc = tracemalloc\n\n # Observers\n self._learning_rate = copy.deepcopy(self.learning_rate) if \\\n self.learning_rate else self.learning_rate\n\n self._early_stop = copy.deepcopy(self.early_stop) if self.early_stop\\\n else self.early_stop \n\n # ----------------------------------------------------------------------- #\n def _initialize_state(self, log=None):\n \"\"\"Initializes variables that represent teh state of the estimator.\"\"\"\n self._epoch = 0 \n self._batch = 0 \n self._train_data_package = None\n self._theta = None\n self._gradient = None\n self._converged = False\n self._data_prepared = False\n self._performance_log = OrderedDict()\n self._profile_log = OrderedDict()\n self._timer = time\n self._epoch_log = None\n self._start_time = None\n self._end_time = None\n # Attributes\n self.n_features_in_ = None\n self.n_features_out_ = None\n self.classes_ = None\n self.n_classes_ = None\n # ----------------------------------------------------------------------- # \n def _unpack_data(self, data):\n \"\"\"Unpacks the data into attributes\"\"\"\n data_sets = {'X_train_': False, 'y_train_': False,\n 'X_val_': False, 'y_val_' : False, \n 'X_test_': False, 'y_test_' : False}\n for k,v in data_sets.items():\n if data.get(k):\n if data[k].get('data') is not None: \n data_sets[k] = True\n setattr(self, k, data[k]['data']) \n \n if data.get('X_train_'):\n self.n_features_in_ = data['X_train_']['metadata']['orig']['n_features']\n self.n_features_out_ = data['X_train_']['metadata']['processed']['n_features']\n\n self.train_data_package_ = data\n\n # ----------------------------------------------------------------------- # \n def _prepare_train_data(self, X, y=None, random_state=None):\n \"\"\"Prepares training data.\n \n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The independent variables from the training set.\n\n y : array-like of shape (n_samples,) or (n_samples, n_classes)\n The dependent variable from the training set.\n\n Returns\n -------\n data : dict\n dictionary containing data and metadata \n\n \"\"\"\n data = self._data_processor.process_train_data(X, y, random_state)\n self._unpack_data(data)\n\n # ----------------------------------------------------------------------- # \n def _prepare_train_val_data(self, X, y=None, val_size=None, random_state=None):\n \"\"\"Prepares training data.\n \n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The independent variables from the training set.\n\n y : array-like of shape (n_samples,) or (n_samples, n_classes)\n The dependent variable from the training set.\n\n val_size : float in [0,1)\n The proportion of data to allocate to the validation set.\n\n random_state : int or None (default=None)\n Seed for pseudo-randomization\n\n Returns\n -------\n data : dict\n Dictionary containing data and metadata\n \"\"\"\n\n data = self._data_processor.process_train_val_data(X, y, val_size, \n random_state)\n self._unpack_data(data) \n # ----------------------------------------------------------------------- # \n def _prepare_data(self, X, y=None):\n \"\"\"Prepares data for training and creates data and metadata attributes.\"\"\" \n self._data_prepared = True \n if self.val_size:\n self._prepare_train_val_data(X, y, self.val_size, self.random_state)\n else:\n self._prepare_train_data(X, y, self.random_state)\n\n # ----------------------------------------------------------------------- #\n def _initialize_observers(self, log=None):\n \"\"\"Initialize remaining observers. Create and initialize observer list.\"\"\" \n log = log or {} \n\n self._observer_list.append(self._blackbox)\n self._observer_list.append(self._summary)\n\n if self.verbose:\n self._observer_list.append(self._progress)\n\n if self._learning_rate:\n self._observer_list.append(self._learning_rate)\n\n if self._early_stop:\n self._observer_list.append(self._early_stop)\n\n if self.check_gradient:\n self._observer_list.append(self._gradient_checker)\n \n # Publish model parameters and estimator instance on observer objects.\n self._observer_list.set_params(self.get_params())\n self._observer_list.set_model(self) \n self._observer_list.on_train_begin(log)\n\n # ----------------------------------------------------------------------- #\n def _on_train_begin(self, log=None):\n \"\"\"Compiles the estimator, initializes weights, observers, and state\"\"\"\n log = log or {} \n validation.validate_estimator(self)\n self._compile(log) \n self._initialize_state(log)\n self._prepare_data(log.get('X'), log.get('y'))\n self._initialize_observers(log)\n self._theta = self._init_weights(self.theta_init)\n # ----------------------------------------------------------------------- #\n def _on_train_end(self, log=None):\n \"\"\"Finalizes training and posts model parameter attributes.\"\"\"\n log = log or {}\n self.n_iter_ = self._epoch \n self.intercept_, self.coef_ = unpack_parameters(self._theta)\n self._observer_list.on_train_end() \n # ----------------------------------------------------------------------- #\n def _on_epoch_begin(self, log=None):\n \"\"\"Initializes the epoch and notifies observers.\"\"\"\n log = log or {} \n self._epoch_log = self._performance_snapshot(log)\n self._start_time = self._timer.perf_counter() \n self._tracemalloc.start()\n self._observer_list.on_epoch_begin(epoch=self._epoch, log=log)\n # ----------------------------------------------------------------------- #\n def _on_epoch_end(self, log=None):\n \"\"\"Finalizes epoching and notifies observers.\"\"\"\n log = log or {}\n\n self._end_time = self._timer.perf_counter()\n elapsed_time = self._end_time - self._start_time\n \n current, peak = self._tracemalloc.get_traced_memory()\n self._tracemalloc.stop() \n \n self._epoch_log['cpu_time'] = elapsed_time\n self._epoch_log['current_memory'] = current\n self._epoch_log['peak_memory'] = peak\n \n self._observer_list.on_epoch_end(epoch=self._epoch, log=self._epoch_log)\n self._epoch += 1\n # ----------------------------------------------------------------------- # \n def _on_batch_begin(self, log=None):\n \"\"\"Initializes the batch and notifies observers.\"\"\"\n log = log or {}\n self._observer_list.on_batch_begin(batch=self._batch, log=log) \n # ----------------------------------------------------------------------- # \n def _on_batch_end(self, log=None):\n \"\"\"Wraps up the batch and notifies observers.\"\"\"\n log = log or {}\n self._observer_list.on_batch_end(batch=self._batch, log=log) \n self._batch += 1 \n\n # ----------------------------------------------------------------------- # \n def _init_weights(self, theta_init=None):\n \"\"\"Initializes parameters to theta_init or to random values.\n \n Parameters\n ----------\n theta_init : array-like of shape (n_features,) or (n_features, n_classes) Optional\n Optional initial values for the model parameters.\n\n Raises\n ------\n Exception if data has not been processed\n\n Returns\n ------ \n theta : array-like of shape (n_features,) or (n_features, n_classes)\n \"\"\"\n if not self._data_prepared:\n raise Exception(\"Data must be prepared before weights are initialized.\")\n\n if theta_init is not None:\n if theta_init.shape != (self.n_features_out_,):\n msg = \"Initial parameters theta must have shape (n_features,).\"\n raise ValueError(msg)\n theta = theta_init\n else:\n # Random initialization of weights\n rng = np.random.RandomState(self.random_state) \n theta = rng.randn(self.n_features_out_) \n # Set the bias initialization to zero\n theta[0] = 0\n return theta \n # ----------------------------------------------------------------------- # \n def _compute_output(self, theta, X):\n \"\"\"Computes output of the current iteration.\n\n For linear regression, this is the linear combination of the inputs\n and the weights. For binary classification the output is the sigmoid\n probability of the positive class. For the multiclass case,\n the output is the softmax probabilities. \n\n Parameters\n ----------\n theta : array-like (n_features,) or (n_features, n_classes)\n The model parameters at the current iteration\n\n X : array-like (n_samples, n_features)\n The features including a constant bias term.\n \n Returns\n -------\n y_out : float\n \"\"\"\n\n return np.array(X.dot(theta), dtype=np.float32)\n\n # ----------------------------------------------------------------------- # \n def _compute_loss(self, theta, y, y_out):\n \"\"\"Computes the average loss of the model.\n\n Parameters\n ----------\n theta : array-like (n_features,) or (n_features, n_classes)\n The model parameters at the current iteration\n\n y : array-like of shape (n_samples,)\n True target values\n\n y_out : array-like of shape (n_samples,)\n The real-valued output of the model.\n\n Returns\n -------\n J : float\n \"\"\"\n return self._loss.cost(theta, y, y_out)\n # ----------------------------------------------------------------------- # \n def _gradient(self, theta, X, y, y_out):\n \"\"\"Computes the gradient.\"\"\"\n\n return self._loss.gradient(theta, X, y, y_out) \n # ----------------------------------------------------------------------- # \n def _performance_snapshot(self, log=None):\n \"\"\"Computes loss and scores for the current set of parameters.\"\"\"\n log = log or {}\n log['epoch'] = self._epoch\n log['eta'] = self._eta\n log['theta'] = self._theta\n\n y_out = self._compute_output(self._theta, self.X_train_)\n log['train_cost'] = self._compute_loss(self._theta, self.y_train_,\n y_out)\n log['train_score'] = self.score(self.X_train_, self.y_train_)\n\n # Check not only val_size but also for empty validation sets \n if self.val_size:\n if hasattr(self, 'X_val_'):\n if self.X_val_.shape[0] > 0: \n y_out_val = self._compute_output(self._theta, self.X_val_)\n log['val_cost'] = self._compute_loss(self._theta, self.y_val_, y_out_val) \n log['val_score'] = self.score(self.X_val_, self.y_val_)\n # Store the gradient and its magnitude\n log['gradient'] = self._gradient\n log['gradient_norm'] = None\n if self._gradient is not None:\n log['gradient_norm'] = np.linalg.norm(self._gradient) \n\n return log \n\n # ----------------------------------------------------------------------- # \n def train_epoch(self):\n \"\"\"Trains a single epoch.\"\"\"\n self._on_epoch_begin()\n \n log = {}\n log['epoch'] = self._epoch\n\n for X_batch, y_batch in batch_iterator(self.X_train_, self.y_train_, batch_size=self.batch_size):\n self._on_batch_begin()\n\n y_out = self._compute_output(self._theta, X_batch) \n cost = self._compute_loss(self._theta, y_batch, y_out)\n # Grab theta for the batch log before it is updated\n log = {'batch': self._batch,'theta': self._theta, \n 'train_cost': cost}\n # Update the model parameters and return gradient for monitoring purposes.\n self._theta, self._gradient = self._optimizer(gradient=self._loss.gradient, \\\n learning_rate=self._eta, theta=copy.copy(self._theta), X=X_batch, y=y_batch,\\\n y_out=y_out) \n \n log['gradient_norm'] = np.linalg.norm(self._gradient) \n self._on_batch_end(log=log) \n \n self._on_epoch_end()\n\n\n # ----------------------------------------------------------------------- # \n def fit(self, X, y):\n \"\"\"Trains model until stop condition is met.\n \n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data\n y : numpy array, shape (n_samples,)\n Target values \n Returns\n -------\n self : returns instance of self\n \"\"\" \n log = {'X': X, 'y': y}\n self._on_train_begin(log) \n\n while (self._epoch < self.epochs and not self._converged): \n self.train_epoch()\n\n self._on_train_end()\n return self \n \n # ----------------------------------------------------------------------- # \n def _check_X(self, X, theta):\n \"\"\"Checks X to ensure that it has been processed for training/prediction.\"\"\"\n X = validation.check_X(X) \n if X.shape[1] != theta.shape[0]: \n data = self._data_processor.process_X_test_data(X) \n X = data['X_test_']['data']\n return X\n \n # ----------------------------------------------------------------------- # \n def _check_y_pred(self, y_pred):\n if y_pred.ndim > 1:\n msg = self.__class__.__name__ + \" doesn't support multioutput.\"\n warnings.warn(msg, UserWarning) \n else:\n return y_pred \n\n # ----------------------------------------------------------------------- # \n @abstractmethod\n def predict(self, X):\n \"\"\"Computes prediction on test data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n\n theta : array-like of shape (n_features) or (n_features, n_classes)\n The model parameters\n \n Returns\n -------\n y_pred : prediction\n \"\"\"\n pass\n\n # ----------------------------------------------------------------------- # \n def score(self, X, y):\n \"\"\"Default behavior for scoring predictions.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) \n The target variable.\n \n Returns\n -------\n score based upon the metric object.\n \n \"\"\" \n y_pred = self.predict(X)\n return self._scorer(y, y_pred, n_features=self.n_features_in_) \n\n # ----------------------------------------------------------------------- # \n def summarize(self): \n \"\"\"Prints and optimization report. \"\"\"\n self._summary.report() \n\n# --------------------------------------------------------------------------- #\n# GRADIENT DESCENT REGRESSOR #\n# --------------------------------------------------------------------------- #\nclass GDRegressor(GradientDescent, RegressorMixin):\n \"\"\"Gradient Descent Regressor.\"\"\"\n\n @property\n def description(self): \n return \"Linear Regression by \" + self.variant \n\n @property\n def loss(self):\n return self._loss\n\n @loss.setter\n def loss(self, x):\n validation.validate_regression_loss(x)\n self._loss = x\n\n @property\n def data_processor(self):\n return self._data_processor\n\n @data_processor.setter\n def data_processor(self, x):\n validation.validate_regression_data_processor(x)\n self._data_processor = x \n\n def _get_tags(self):\n tags = {}\n tags['X_types'] = ['2darray']\n tags['poor_score'] = True\n return tags \n \n # --------------------------------------------------------------------------- #\n def predict(self, X):\n \"\"\"Predicts the output class.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) \n The target variable.\n \n Returns\n -------\n y_pred : array-like of shape (n_samples, )\n \n \"\"\" \n X = self._check_X(X, self._theta)\n y_pred = self._compute_output(self._theta, X)\n y_pred = self._check_y_pred(y_pred)\n return y_pred\n \n # --------------------------------------------------------------------------- #\n def predict_proba(self, theta, X):\n raise NotImplementedError(\"predict_proba is not implemented for the GDRegression class.\") \n\n# --------------------------------------------------------------------------- #\n# GRADIENT DESCENT CLASSIFIER (BINARY) #\n# --------------------------------------------------------------------------- #\nclass GDBinaryclass(GradientDescent, ClassifierMixin):\n \"\"\"Gradient Descent Regressor.\"\"\"\n\n @property\n def description(self): \n return \"Binary Classification by \" + self.variant \n\n @property\n def loss(self):\n return self._loss\n\n @loss.setter\n def loss(self, x):\n validation.validate_binaryclass_loss(x)\n self._loss = x\n \n @property\n def data_processor(self):\n return self._data_processor\n\n @data_processor.setter\n def data_processor(self, x):\n validation.validate_binaryclass_data_processor(x)\n self._data_processor = x \n\n @property\n def activation(self):\n return self._activation\n\n @activation.setter\n def activation(self, x):\n validation.validate_binaryclass_activation(x)\n self._activation = x \n \n # --------------------------------------------------------------------------- # \n def _get_tags(self):\n tags = {}\n tags['binary_only'] = True\n if self.learning_rate or self.loss.regularizer:\n tags['poor_score'] = True\n return tags\n \n def _unpack_data(self, data):\n \"\"\"Unpacks the data into attributes.\"\"\"\n super(GDMulticlass, self)._unpack_data(data)\n self.classes_ = data['y_train_']['metadata']['orig']['classes']\n self.n_classes_ = data['y_train_']['metadata']['orig']['n_classes'] \n\n # --------------------------------------------------------------------------- # \n def compute_output(self, theta, X):\n \"\"\"Computes output as a probability of the positive class.\n\n The logit or linear combination of inputs and parameters is passed\n through a sigmoid function to create the probability of the \n positive class.\n\n Parameters\n ----------\n theta : array_like of shape (n_features,) or (n_features, n_classes)\n The current learned parameters of the model.\n\n X : array-like of shape (n_samples, n_features)\n The input data\n \n Returns\n -------\n y_out\n \"\"\"\n z = super(GDBinaryclass, self)._compute_output(theta, X) \n return self._activation(z)\n\n # --------------------------------------------------------------------------- # \n def _check_y(self, y):\n \"\"\"Confirms y has been encoded.\"\"\"\n if not validation.is_binary(y):\n data = self._data_processor.process_y_test_data(y)\n return data['y_test_']['data']\n return y\n\n def predict(self, X):\n \"\"\"Predicts the output class.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) \n The target variable.\n \n Returns\n -------\n y_pred : array-like of shape (n_samples, )\n \n \"\"\" \n return self.predict(X, self._theta)\n\n def predict_proba(self, X, theta):\n \"\"\"Predicts the probability of the positive class\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n\n theta : array_like of shape (n_features,) or (n_features, n_classes)\n The current learned parameters of the model.\n\n Returns\n -------\n y_pred : Predicted class probability\n \"\"\"\n X = self._check_X(X, theta)\n y_pred = self._compute_output(theta, X) \n y_pred = self._check_y_pred(y_pred)\n return y_pred \n\n def score(self, X, y):\n \"\"\"Computes scores for test data after training.\n\n Calls the predict function based upon whether the metric for the scorer\n takes a probability or a predicted class.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) \n The target variable.\n \n Returns\n -------\n score based upon the metric object.\n \n \"\"\" \n if self._scorer.is_probability_metric:\n y_pred = self.predict_proba(X)\n else:\n y_pred = self.predict(X)\n return self._scorer(y, y_pred, n_features=self.n_features_in_)\n \n\n# --------------------------------------------------------------------------- #\n# GRADIENT DESCENT MULTICLASS CLASSIFIER #\n# --------------------------------------------------------------------------- #\nclass GDMulticlass(GradientDescent, ClassifierMixin):\n \"\"\"Gradient Descent Multiclass Classifier.\"\"\"\n\n @property\n def description(self):\n return \"Multiclass Classification by \" + self.variant \n\n @property\n def loss(self):\n return self._loss\n\n @loss.setter\n def loss(self, x):\n validation.validate_multiclass_loss(x)\n self._loss = x\n\n @property\n def data_processor(self):\n return self._data_processor\n\n @data_processor.setter\n def data_processor(self, x):\n validation.validate_multiclass_data_processor(x)\n self._data_processor = x \n\n @property\n def activation(self):\n return self._activation\n\n @activation.setter\n def activation(self, x):\n validation.validate_multiclass_activation(x)\n self._activation = x \n \n # --------------------------------------------------------------------------- #\n def _get_tags(self):\n return {'binary_only': True} \n \n # --------------------------------------------------------------------------- #\n def init_weights(self, theta_init=None):\n \"\"\"Initializes parameters to theta_init or to random values.\n \n Parameters\n ----------\n theta_init : array-like of shape (n_features,) or (n_features, n_classes) Optional\n Optional initial values for the model parameters.\n\n Raises\n ------\n Exception if data has not been processed\n\n Returns\n ------ \n theta : array-like of shape (n_features,) or (n_features, n_classes)\n \"\"\"\n if not self._data_prepared:\n raise Exception(\"Data must be prepared before weights are initialized.\")\n\n if theta_init is not None:\n assert theta_init.shape == (self.n_features_out_, self.n_classes_),\\\n \"Initial parameters theta must have shape (n_features,n_classes).\"\n theta = theta_init\n else:\n # Random initialization of weights\n rng = np.random.RandomState(self.random_state) \n theta = rng.randn(self.n_features_out_, self.n_classes_) \n # Set the bias initialization to zero\n theta[0] = 0\n return theta \n # --------------------------------------------------------------------------- #\n def _unpack_data(self, data):\n \"\"\"Unpacks the data into attributes.\"\"\"\n super(GDMulticlass, self)._unpack_data(data)\n self.classes_ = data['y_train_']['metadata']['orig']['classes']\n self.n_classes_ = data['y_train_']['metadata']['orig']['n_classes'] \n \n # --------------------------------------------------------------------------- #\n def _compute_output(self, theta, X):\n \"\"\"Computes output as a vector of class probabilities.\n\n The unnormalized linear combination of inputs and parameters is passed\n through a softmax function to create a vector of probabilities.\n\n Parameters\n ----------\n theta : array_like of shape (n_features,) or (n_features, n_classes)\n The current learned parameters of the model.\n\n X : array-like of shape (n_samples, n_features)\n The input data\n \n Returns\n -------\n y_out\n \"\"\" \n z = super(GDMulticlass, self)._compute_output(theta, X)\n return self._activation(z) \n # --------------------------------------------------------------------------- #\n def _check_y(self, y):\n \"\"\"Confirms y has been one-hot encoded.\"\"\"\n if not validation.is_one_hot(y):\n data = self._data_processor.process_y_test_data(y)\n y = data['y_test_']['data'] \n return y\n \n # --------------------------------------------------------------------------- #\n def predict(self, X):\n \"\"\"Computes prediction on test data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data \n\n theta : array_like of shape (n_features,) or (n_features, n_classes)\n The current learned parameters of the model.\n\n Returns\n -------\n y_pred : Predicted class\n \"\"\"\n \n o = self.predict_proba(X, self._theta)\n return o.argmax(axis=1)\n\n # --------------------------------------------------------------------------- #\n def predict_proba(self, X):\n \"\"\"Predicts the probability of the positive class.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) \n The target variable.\n \n Returns\n -------\n y_pred : array-like of shape (n_samples, )\n \n \"\"\" \n X = self._check_X(X, theta)\n return self._compute_output(theta, X) \n\n def score(self, X, y):\n \"\"\"Computes scores for test data after training.\n\n Calls the predict function based upon whether the metric for the scorer\n takes a probability or a predicted class.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) or (n_samples, n_classes) \n The target variable.\n \n Returns\n -------\n score based upon the metric object.\n \n \"\"\" \n if self._scorer.is_probability_metric:\n y_pred = self.predict_proba(X)\n else:\n y_pred = self.predict(X)\n return self._scorer(y, y_pred, n_features=self.n_features_in_)\n\n# =========================================================================== #\n# GRADIENT DESCENT PURE OPTIMIZER #\n# =========================================================================== #\nclass GD(BaseEstimator):\n \"\"\"Performs pure optimization of an objective function.\"\"\"\n\n def __init__(self, eta0=0.01, epochs=1000, theta_init=None,\n objective=None, optimizer=None, learning_rate=None,\n blackbox=None, verbose=False, random_state=None):\n\n self.eta0 = eta0\n self.learning_rate=learning_rate\n self.epochs = epochs\n self.objective = objective\n self.theta_init = theta_init\n self.optimizer = optimizer\n self.verbose = verbose\n self.random_state = random_state \n\n # ----------------------------------------------------------------------- #\n def _init_weights(self):\n \"\"\"Initializes parameters.\"\"\"\n if self.theta_init is not None:\n if self.theta_init.shape[0] != 2:\n raise ValueError(\"Parameters theta must have shape (2,)\")\n else:\n self._theta = self.theta_init\n else: \n rng = np.random.RandomState(self.random_state) \n self._theta = rng.randn(2) \n\n # ----------------------------------------------------------------------- # \n def fit(self, X=None, y=None):\n \"\"\"Performs the optimization of the objective function..\n \n Parameters\n ----------\n objective : object derived from Objective class\n The objective function to be optimized\n\n Returns\n -------\n self\n \"\"\"\n \n self._on_train_begin()\n\n while (self._epoch < self.epochs and not self._converged):\n\n self._on_epoch_begin()\n\n cost = self._objective(self._theta)\n\n self._theta, self._gradient = self._optimizer(gradient=self._objective.gradient, \\\n learning_rate=self._eta, theta=copy.deepcopy(self._theta)) \n\n self._on_epoch_end()\n\n self._on_train_end()\n return self \n\n\n def predict(self, X):\n \"\"\"Predicts output as linear combination of inputs and weights.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data\n \n y : array_like of shape (n_samples,) \n The target variable.\n \n Returns\n -------\n y_pred : array-like of shape (n_samples, )\n \n \"\"\" \n return self._objective(X, self._theta) \n# %%\n" ]
[ [ "numpy.sum", "numpy.random.choice", "numpy.unique" ], [ "numpy.linalg.norm", "numpy.random.RandomState" ] ]
RanganThaya/ggnn.pytorch.sparse
[ "66be8c87c8fec83bb02c8046203e7faa9de55f41" ]
[ "utils/train.py" ]
[ "import torch\nfrom torch.autograd import Variable\n\n\ndef train(epoch, dataloader, net, criterion, optimizer, opt):\n net.train()\n for i, (adj_matrix, annotation, target) in enumerate(dataloader, 0):\n net.zero_grad()\n\n padding = torch.zeros(len(annotation),\n opt.state_dim - opt.annotation_dim)\n init_input = torch.cat((annotation, padding), 1)\n if opt.cuda:\n init_input = init_input.cuda()\n adj_matrix = adj_matrix.cuda()\n annotation = annotation.cuda()\n target = target.cuda()\n\n init_input = Variable(init_input)\n adj_matrix = Variable(adj_matrix)\n annotation = Variable(annotation)\n target = Variable(target)\n\n output = net(init_input, annotation, adj_matrix)\n\n loss = criterion(output, target)\n\n loss.backward()\n optimizer.step()\n\n if i % int(len(dataloader) / 10 + 1) == 0 and opt.verbal:\n print('[%d/%d][%d/%d] Loss: %.4f' %\n (epoch, opt.niter, i, len(dataloader), loss.data[0]))\n" ]
[ [ "torch.autograd.Variable", "torch.cat" ] ]
McCoyGroup/Coordinerds
[ "058a4f5b29f157e499cec3c8f2da8b216f0210ef" ]
[ "Psience/Molecools/CoordinateSystems.py" ]
[ "\"\"\"\nDefines useful extended internal coordinate frames\n\"\"\"\n\n\n\nimport numpy as np\nimport McUtils.Numputils as nput\nfrom McUtils.Coordinerds import (\n ZMatrixCoordinateSystem, CartesianCoordinateSystem, CoordinateSystemConverter,\n ZMatrixCoordinates, CartesianCoordinates3D, CoordinateSet, CoordinateSystemConverters\n)\nfrom .MoleculeInterface import AbstractMolecule\n\n__all__ = [\n \"MolecularZMatrixCoordinateSystem\",\n \"MolecularCartesianCoordinateSystem\"\n]\n\n__reload_hook__ = [\".MoleculeInterface\"]\n\ndef _get_best_axes(first_pos, axes):\n \"\"\"\n Determine the best pair of inertial axes so that we don't get large-scale breakdowns from the choice of embedding\n\n :param first_pos:\n :type first_pos:\n :param axes:\n :type axes:\n :return:\n :rtype:\n \"\"\"\n\n if axes.ndim > 2:\n axes = axes[..., (0, 1), :]\n ax_choice = (0, 1)\n ax_names = [\"A\", \"B\"]\n else:\n fp_norm = np.linalg.norm(first_pos)\n if fp_norm > 1.0e-10: # not chilling at the origin...\n first_pos = first_pos / fp_norm\n # check if it lies along an axis or is perpendicular to an axis\n a_proj = np.dot(first_pos, axes[0])\n b_proj = np.dot(first_pos, axes[1])\n c_proj = np.dot(first_pos, axes[2])\n if np.abs(b_proj) < .05: # lies in the A/C plane\n if np.abs(a_proj) > .95:\n ax_choice = (1, 2)\n ax_names = [\"B\", \"C\"]\n else:\n ax_choice = (0, 1)\n ax_names = [\"A\", \"B\"]\n elif np.abs(c_proj) < .05: # lies in the A/B plane\n if np.abs(a_proj) > .95:\n ax_choice = (1, 2)\n ax_names = [\"B\", \"C\"]\n else:\n ax_choice = (0, 2)\n ax_names = [\"A\", \"C\"]\n elif np.abs(a_proj) < .05: # lies in the B/C plane\n if np.abs(b_proj) > .95:\n ax_choice = (0, 2)\n ax_names = [\"A\", \"C\"]\n else:\n ax_choice = (0, 1)\n ax_names = [\"A\", \"B\"]\n else: # not in any of the planes so no issues\n ax_choice = (0, 1)\n ax_names = [\"A\", \"B\"]\n\n else:\n ax_choice = (0, 1)\n ax_names = [\"A\", \"B\"]\n axes = axes[ax_choice,]\n return axes, ax_names, ax_choice\n\nclass MolecularZMatrixCoordinateSystem(ZMatrixCoordinateSystem):\n \"\"\"\n Mirrors the standard ZMatrix coordinate system in _almost_ all regards, but forces an embedding\n \"\"\"\n name = \"MolecularZMatrix\"\n def __init__(self, molecule, converter_options=None, **opts):\n \"\"\"\n\n :param molecule:\n :type molecule: AbstractMolecule\n :param converter_options:\n :type converter_options:\n :param opts:\n :type opts:\n \"\"\"\n self.molecule = molecule\n if converter_options is None:\n converter_options = opts\n opts = {}\n nats = len(molecule.atoms)\n super().__init__(converter_options=converter_options, dimension=(nats, 3), coordinate_shape=(nats, 3), opts=opts)\n self.set_embedding()\n @property\n def origins(self):\n return self.converter_options['origins']\n @property\n def axes(self):\n return self.converter_options['axes']\n\n def pre_convert(self, system):\n self.set_embedding()\n\n def set_embedding(self):\n molecule = self.molecule\n com = molecule.center_of_mass\n axes = molecule.inertial_axes\n converter_options = self.converter_options\n if 'ordering' in converter_options:\n ordering = np.array(converter_options['ordering'], dtype=int)\n ordering[0, 1] = -3; ordering[0, 2] = -1; ordering[0, 3] = -2\n ordering[1, 2] = -1; ordering[1, 3] = -2\n ordering[2, 3] = -2\n converter_options['ordering'] = ordering\n first = ordering[0, 0]\n else:\n first = 0\n\n first_pos = molecule.coords[first]\n axes, ax_names, ax_choice = _get_best_axes(first_pos, axes)\n\n converter_options['origins'] = com\n converter_options['axes'] = axes\n converter_options['axes_labels'] = ax_names\n converter_options['axes_choice'] = ax_choice\n converter_options['molecule'] = molecule\n\n def jacobian(self,\n *args,\n reembed=None,\n strip_dummies=None,\n converter_options=None,\n **kwargs\n ):\n if converter_options is None:\n converter_options = {}\n merged_convert_options = dict(self.converter_options, **converter_options)\n try:\n remb = merged_convert_options['reembed'] if reembed is None else reembed\n except KeyError:\n remb = None\n\n try:\n strip_dummies = merged_convert_options['strip_dummies'] if strip_dummies is None else strip_dummies\n except KeyError:\n strip_dummies = False\n\n if strip_dummies:\n dummies = self.molecule.dummy_positions\n else:\n dummies = None\n\n if dummies is not None:\n main_excludes = np.setdiff1d(\n np.arange(self.molecule.num_atoms),\n dummies\n )\n\n try:\n self.converter_options['reembed'] = True if remb is None else remb\n jacs = super().jacobian(*args, converter_options=converter_options, **kwargs)\n raw_jacs = []\n for j in jacs:\n ext_dim = j.ndim - 2\n shp = sum(\n ((j.shape[i] // 3, 3) for i in range(ext_dim)),\n ()\n ) + j.shape[-2:]\n j = j.reshape(shp)\n if dummies is not None:\n for i in range(ext_dim):\n j = np.take(j, main_excludes, axis=2*i)\n\n # j.shape[:i]\n # + (j.shape[i] // 3, 3)\n # + j.shape[i+1:]\n # )\n raw_jacs.append(j)\n jacs = raw_jacs\n return jacs\n finally:\n if remb is not None:\n self.converter_options['reembed'] = remb\n\nclass MolecularCartesianCoordinateSystem(CartesianCoordinateSystem):\n \"\"\"\n Mirrors the standard Cartesian coordinate system in _almost_ all regards, but forces an embedding\n \"\"\"\n name= \"MolecularCartesians\"\n def __init__(self, molecule, converter_options=None, **opts):\n \"\"\"\n\n :param molecule:\n :type molecule: AbstractMolecule\n :param converter_options:\n :type converter_options:\n :param opts:\n :type opts:\n \"\"\"\n self.molecule = molecule #type: AbstractMolecule\n nats = len(self.molecule.atoms)\n if converter_options is None:\n converter_options = opts\n opts = {}\n super().__init__(converter_options=converter_options, dimension=(nats, 3), opts=opts)\n\n def pre_convert(self, system):\n self.set_embedding()\n\n def set_embedding(self):\n \"\"\"\n Sets up the embedding options...\n :return:\n :rtype:\n \"\"\"\n molecule = self.molecule\n com = molecule.center_of_mass\n axes = molecule.inertial_axes\n converter_options = self.converter_options\n if 'ordering' in converter_options:\n ordering = np.array(converter_options['ordering'], dtype=int)\n ordering[0, 1] = -3; ordering[0, 2] = -2; ordering[0, 3] = -1\n ordering[1, 2] = -1; ordering[1, 3] = -2\n ordering[2, 3] = -2\n converter_options['ordering'] = ordering\n first = ordering[0, 0]\n else:\n first = 0\n\n first_pos = molecule.coords[first]\n axes, ax_names, ax_choice = _get_best_axes(first_pos, axes)\n\n converter_options['origins'] = com\n converter_options['axes'] = axes\n converter_options['axes_labels'] = ax_names\n converter_options['axes_choice'] = ax_choice\n converter_options['molecule'] = molecule\n\n def jacobian(self,\n coords,\n system,\n strip_dummies=None,\n converter_options=None,\n analytic_deriv_order=None,\n **kwargs\n ):\n if converter_options is None:\n converter_options = {}\n merged_convert_options = dict(self.converter_options, **converter_options)\n try:\n strip_dummies = merged_convert_options['strip_dummies'] if strip_dummies is None else strip_dummies\n except KeyError:\n strip_dummies = False\n\n try:\n analytic_deriv_order = merged_convert_options['analytic_deriv_order'] if analytic_deriv_order is None else analytic_deriv_order\n except KeyError:\n analytic_deriv_order = 0\n\n if strip_dummies:\n dummies = self.molecule.dummy_positions\n if len(dummies) == 0:\n dummies = None\n else:\n dummies = None\n\n if dummies is not None:\n main_excludes = np.setdiff1d(\n np.arange(self.molecule.num_atoms),\n dummies\n )\n else:\n main_excludes = None\n\n jacs = super().jacobian(coords, system, analytic_deriv_order=analytic_deriv_order, converter_options=converter_options, **kwargs)\n raw_jacs = []\n for n,j in enumerate(jacs): # this expects a full filling of the jacobians which maybe I need to not expect...\n baseline = 2*analytic_deriv_order + len(coords.shape)\n ext_dim = j.ndim - baseline\n shp = sum(\n ((j.shape[i] // 3, 3) for i in range(ext_dim)),\n ()\n ) + j.shape[-baseline:]\n j = j.reshape(shp)\n if dummies is not None:\n for i in range(ext_dim):\n j = np.take(j, main_excludes, axis=2*i)\n for i in range(analytic_deriv_order):\n j = np.take(j, main_excludes, axis=-2*(i+2))\n\n if len(coords.shape) > 2:\n j = np.moveaxis(j, -3, 0)\n\n raw_jacs.append(j)\n jacs = raw_jacs\n return jacs\n\nclass MolecularCartesianToZMatrixConverter(CoordinateSystemConverter):\n \"\"\"\n ...\n \"\"\"\n types = (MolecularCartesianCoordinateSystem, MolecularZMatrixCoordinateSystem)\n def convert(self, coords, molecule=None, origins=None, axes=None, ordering=None, **kwargs):\n \"\"\"\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n :param coords:\n :type coords: CoordinateSet\n :param molecule:\n :type molecule:\n :param origins:\n :type origins:\n :param axes:\n :type axes:\n :param ordering:\n :type ordering:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n \"\"\"\n\n zmcs, opts = self.convert_many(np.array([coords]),\n molecule=molecule, origins=origins, axes=axes, ordering=ordering, **kwargs)\n zmcs = zmcs[0]\n\n if 'derivs' in opts:\n derivs = opts['derivs']\n reshaped_derivs = [None] * len(derivs)\n for i, v in enumerate(derivs):\n reshaped_derivs[i] = v[0]\n opts['derivs'] = reshaped_derivs\n\n return zmcs, opts\n\n def convert_many(self, coords,\n molecule=None,\n origins=None, axes=None,\n ordering=None,\n strip_embedding=True,\n strip_dummies=False,\n **kwargs):\n \"\"\"\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n\n :param coords: coordinates in Cartesians to convert\n :type coords: np.ndarray\n :param molecule:\n :type molecule: AbstractMolecule\n :param origins: the origin for each individual structure\n :type origins: np.ndarray\n :param axes: the axes for each structure\n :type axes: np.ndarray\n :param ordering: the Z-matrix ordering spec\n :type ordering:\n :param strip_embedding: whether to strip the embedding coordinates\n :type strip_embedding:\n :param strip_dummies: whether to strip all dummy coordinates\n :type strip_dummies:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n \"\"\"\n\n n_sys = coords.shape[0]\n n_coords = coords.shape[1]\n n_atoms = len(molecule.atoms)\n\n # we add three dummy atoms at the origins and along the axes before doing the conversion\n if origins.ndim == 1:\n origins = np.broadcast_to(origins[np.newaxis, np.newaxis], (n_sys, 1, 3))\n elif origins.ndim == 2:\n origins = origins[:, np.newaxis, :]\n if axes.ndim == 2:\n axes = np.broadcast_to(axes[np.newaxis], (n_sys, 2, 3))\n if origins.shape[0] != n_sys:\n if n_sys % origins.shape[0] != 0:\n raise ValueError(\"inconsistent shapes; origins shape {} but coords shape {}\".format(\n origins.shape,\n coords.shape\n ))\n num_coords = n_sys // origins.shape[0]\n origins = np.broadcast_to(origins[:, np.newaxis, :, :], (origins.shape[0], num_coords) + origins.shape[1:])\n origins = origins.reshape((n_sys,) + origins.shape[2:])\n if axes.shape[0] != n_sys:\n if n_sys % axes.shape[0] != 0:\n raise ValueError(\"inconsistent shapes; axes shape {} but coords shape {}\".format(\n axes.shape,\n coords.shape\n ))\n num_coords = n_sys // axes.shape[0]\n axes = np.broadcast_to(axes[:, np.newaxis, :, :], (axes.shape[0], num_coords) + axes.shape[1:])\n axes = axes.reshape((n_sys,) + axes.shape[2:])\n coords = np.concatenate([origins, origins+axes, coords], axis=1)\n if ordering is not None:\n ordering = np.array(ordering, dtype=int)\n ordering[0, 1] = -3; ordering[0, 2] = -2; ordering[0, 3] = -1\n ordering[1, 2] = -2; ordering[1, 3] = -1\n ordering[2, 3] = -1\n ordering = ordering + 3\n ordering = np.concatenate([ [[0, -1, -1, -1], [1, 0, -1, -1], [2, 0, 1, -1]], ordering])\n # print(\"...?\", ordering)\n res = CoordinateSet(coords, CartesianCoordinates3D).convert(ZMatrixCoordinates,\n ordering=ordering,\n origins=origins,\n axes=axes,\n **kwargs\n )\n\n if isinstance(res, tuple):\n zmcs, opts = res\n else:\n zmcs = res\n opts=res.converter_options\n opts['ordering'] = opts['ordering'][3:] - 3\n # zmcs = zmcs[:, 2:]\n if strip_dummies:\n dummies = [0, 1, 2] + [x+3 for x in molecule.dummy_positions] # add on axes\n elif strip_embedding:\n dummies = [0, 1, 2]\n else:\n dummies = None\n\n if dummies is not None:\n main_excludes = np.setdiff1d(\n np.arange(len(molecule.atoms) + 3),\n dummies\n )\n sub_excludes = main_excludes - 1 # drop one fewer terms to drop I think...\n if 'derivs' in opts:\n derivs = opts['derivs']\n reshaped_derivs = [None] * len(derivs)\n deriv_excludes = np.arange(3, len(molecule.atoms) + 3)\n for i, v in enumerate(derivs):\n # drop all terms relating to the embedding of the embedding\n start_dim = v.ndim - 2*(i+2)\n for j in range(start_dim, v.ndim-2, 2):\n v = np.take(v, deriv_excludes, axis=j)\n v = np.take(v, sub_excludes, axis=-2)\n reshaped_derivs[i] = v\n\n opts['derivs'] = reshaped_derivs\n\n zmcs = zmcs[..., sub_excludes, :]\n # raise Exception(derivs.shape)\n return zmcs, opts\n\nMolecularCartesianToZMatrixConverter = MolecularCartesianToZMatrixConverter()\nMolecularCartesianToZMatrixConverter.register(CoordinateSystemConverters)\n\nclass MolecularCartesianToRegularCartesianConverter(CoordinateSystemConverter):\n \"\"\"\n ...\n \"\"\"\n types = (MolecularCartesianCoordinateSystem, CartesianCoordinateSystem)\n def convert(self, coords, **kw):\n return coords, kw\n\n def convert_many(self, coords, **kwargs):\n \"\"\"\n Converts from Cartesian to ZMatrix coords, preserving the embedding\n \"\"\"\n return coords, kwargs\nMolecularCartesianToRegularCartesianConverter = MolecularCartesianToRegularCartesianConverter()\nMolecularCartesianToRegularCartesianConverter.register()\n\nclass MolecularZMatrixToCartesianConverter(CoordinateSystemConverter):\n \"\"\"\n ...\n \"\"\"\n types = (MolecularZMatrixCoordinateSystem, MolecularCartesianCoordinateSystem)\n def convert(self, coords, **kw):\n total_points, opts = self.convert_many(coords[np.newaxis], **kw)\n return total_points[0], opts\n\n def convert_many(self, coords, molecule=None, origins=None, axes=None, ordering=None,\n reembed=False, axes_choice=None, return_derivs=None,\n strip_dummies=False,\n strip_embedding=True,\n planar_ref_tolerance=None,\n **kwargs):\n \"\"\"\n Converts from Cartesian to ZMatrix coords, attempting to preserve the embedding\n \"\"\"\n from .Molecule import Molecule\n\n n_sys = coords.shape[0]\n n_coords = coords.shape[1]\n n_atoms = len(molecule.atoms)\n if n_coords != n_atoms + 2:\n # means we already added the embedding\n if n_coords != n_atoms:\n raise ValueError('Embedding unclear when num_coords ({}) < num_atoms ({})'.format(\n n_coords,\n n_atoms\n ))\n\n x_ax = axes[..., 0, :]\n y_ax = axes[..., 1, :]\n extra_norms0 = nput.vec_norms(x_ax)\n extra_norms1 = nput.vec_norms(y_ax)\n extra_angles, _ = nput.vec_angles(x_ax, y_ax)\n extra_coords = np.zeros((n_sys, 2, 3))\n extra_coords[..., 0, 0] = extra_norms0\n extra_coords[..., 1, 0] = extra_norms1\n extra_coords[..., 1, 1] = extra_angles\n\n coords = np.concatenate([extra_coords, coords], axis=-2)\n if ordering is not None:\n ordering = np.array(ordering, dtype=int)\n ordering = ordering + 3\n ordering = np.concatenate([ [[0, -1, -1, -1], [1, 0, -1, -1], [2, 0, 1, -1]], ordering])\n\n refuse_derivs = reembed and coords.squeeze().ndim != 2\n res = CoordinateSet(coords, ZMatrixCoordinates).convert(CartesianCoordinates3D,\n ordering=ordering,\n origins=origins,\n axes=axes,\n return_derivs=(return_derivs and not refuse_derivs),\n **kwargs)\n\n if isinstance(res, tuple):\n carts, opts = res\n else:\n carts = res\n opts = res.converter_options\n\n if reembed:\n if molecule is None:\n raise ValueError(\"can't reembed without a reference structure\")\n embed_carts = carts[..., 3:, :]\n reembed = not (\n carts.squeeze().ndim == 2 and\n np.allclose(molecule.coords, embed_carts, atol=1.0e-5)\n ) # agree to like a ten thousandth of an angstrom\n if reembed:\n if not return_derivs:\n embed_carts = molecule.embed_coords(embed_carts, planar_ref_tolerance=planar_ref_tolerance)\n carts = np.concatenate([\n carts[..., :3, :],\n embed_carts\n ],\n axis=-2\n )\n else:\n inert_coords, coord_coms, coord_axes = Molecule(molecule.atoms, embed_carts).principle_axis_data\n if axes_choice is None:\n axes_choice = (0, 1)\n guh = self.convert_many(coords,\n origins=coord_coms,\n axes=coord_axes[:, axes_choice],\n molecule=molecule,\n reembed=False,\n ordering=ordering,\n return_derivs=return_derivs,\n axes_choice=axes_choice,\n **kwargs\n )\n return guh\n\n opts['origins'] = origins\n opts['axes'] = axes\n if ordering is not None:\n opts['ordering'] = ordering[3:] - 3\n if strip_dummies:\n # raise Exception(\"wwwwaaaaaaaaat\")\n dummies = [0, 1, 2] + [x + 3 for x in molecule.dummy_positions] # add on axes\n elif strip_embedding:\n dummies = [0, 1, 2]\n else:\n dummies = None\n if dummies is not None:\n main_excludes = np.setdiff1d(\n np.arange(len(molecule.atoms) + 3),\n dummies\n )\n sub_excludes = main_excludes - 1 # drop one fewer terms to drop I think...\n if 'derivs' in opts:\n derivs = opts['derivs']\n reshaped_derivs = [None] * len(derivs)\n deriv_excludes = np.arange(3, len(molecule.atoms) + 3)\n for i, v in enumerate(derivs):\n # drop all terms relating to the embedding of the embedding\n start_dim = v.ndim - i\n for j in range(start_dim, v.ndim, 2):\n v = np.take(v, deriv_excludes, axis=j)\n v = np.take(v, sub_excludes, axis=-2)\n reshaped_derivs[i] = v\n opts['derivs'] = reshaped_derivs\n\n carts = carts[..., main_excludes, :]\n\n return carts, opts\n\nMolecularZMatrixToCartesianConverter = MolecularZMatrixToCartesianConverter()\nMolecularZMatrixToCartesianConverter.register()\n\nclass MolecularZMatrixToRegularZMatrixConverter(CoordinateSystemConverter):\n \"\"\"\n ...\n \"\"\"\n types = (MolecularZMatrixCoordinateSystem, ZMatrixCoordinateSystem)\n def convert(self, coords, **kw):\n return coords, kw\n\n def convert_many(self, coords, **kwargs):\n return coords, kwargs\nMolecularZMatrixToRegularZMatrixConverter = MolecularZMatrixToRegularZMatrixConverter()\nMolecularZMatrixToRegularZMatrixConverter.register()\n\n\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.linalg.norm", "numpy.dot", "numpy.zeros", "numpy.take", "numpy.allclose", "numpy.arange", "numpy.abs", "numpy.moveaxis", "numpy.broadcast_to" ] ]
Veos-Digital/hypergraph_machines
[ "0d24cd89766c45c6c1ffb2967438ef82288a5d3c" ]
[ "hypergraph_machines/examples/generate_figure.py" ]
[ "\"\"\"Trains a hypergraph machine on MNIST and generates Figure 1 panels b and c\nof Discrete and continuous learning machines\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\nfrom hypergraph_machines.hypergraph_machine import HypergraphMachine\nfrom hypergraph_machines.utils import train, test, visualise_graph\nfrom hypergraph_machines.dataset_loader import load_dataset\nfrom hypergraph_machines.utils import BestModelSaver, generate_timestamp, reg_loss\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(context=\"paper\", style=\"white\")\nplt.ion()\n\ndevice = torch.device(\"cuda\")\ntimestamp = generate_timestamp()\nbatch_size, num_epochs = 128, 100\ntrain_loader,\\\ntest_loader,\\\nimage_size = load_dataset(\"MNIST\", batch_size, data_folder = \"../../data\")\nmodel = HypergraphMachine((1,28,28), 10, number_of_classes = 10, tol = 1e-6,\n limit_image_upsample = 2, prune=True).to(device)\noptimizer = torch.optim.SGD(model.parameters(), lr= 3e-3)\nsaver = BestModelSaver('./checkpoints' + timestamp)\n\nfor epoch in range(1, num_epochs + 1):\n print(\"starting epoch {} of {}\".format(epoch, num_epochs))\n train(model, device, train_loader, optimizer, epoch,\n loss_func = reg_loss, loss_inputs = [model, F.nll_loss, 1])\n loss, acc = test(model, device, test_loader)\n saver.save(model, optimizer, epoch, loss, acc)\n if epoch % 10 == 1:\n f,ax = plt.subplots()\n visualise_graph(model, ax=ax)\n f.suptitle(\"epoch {}\".format(epoch))\n" ]
[ [ "torch.device", "matplotlib.pyplot.ion", "matplotlib.pyplot.subplots" ] ]
clappm/AdaptiveDecisionMaking_2018
[ "73de4945bdb5f839d6a041fe2bd9b25dcdaacf9d" ]
[ "ADMCode/snuz/ppo/storage.py" ]
[ "\"\"\"\nModified from \nhttps://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/storage.py\n\"\"\"\n\nimport torch\nfrom torch.utils.data.sampler import BatchSampler\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n\nclass RolloutStorage(object):\n def __init__(self, num_steps, num_processes, obs_shape, action_space,\n state_size):\n self.observations = torch.zeros(num_steps + 1, num_processes,\n *obs_shape)\n self.states = torch.zeros(num_steps + 1, num_processes, state_size)\n self.rewards = torch.zeros(num_steps, num_processes, 1)\n self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)\n self.returns = torch.zeros(num_steps + 1, num_processes, 1)\n self.action_log_probs = torch.zeros(num_steps, num_processes, 1)\n if action_space.__class__.__name__ == 'Discrete':\n action_shape = 1\n else:\n action_shape = action_space.shape[0]\n self.actions = torch.zeros(num_steps, num_processes, action_shape)\n if action_space.__class__.__name__ == 'Discrete':\n self.actions = self.actions.long()\n self.masks = torch.ones(num_steps + 1, num_processes, 1)\n\n def cuda(self):\n self.observations = self.observations.cuda()\n self.states = self.states.cuda()\n self.rewards = self.rewards.cuda()\n self.value_preds = self.value_preds.cuda()\n self.returns = self.returns.cuda()\n self.action_log_probs = self.action_log_probs.cuda()\n self.actions = self.actions.cuda()\n self.masks = self.masks.cuda()\n\n def insert(self, step, current_obs, state, action, action_log_prob,\n value_pred, reward, mask):\n self.observations[step + 1].copy_(current_obs)\n self.states[step + 1].copy_(state)\n self.actions[step].copy_(action)\n self.action_log_probs[step].copy_(action_log_prob)\n self.value_preds[step].copy_(value_pred)\n self.rewards[step].copy_(reward)\n self.masks[step + 1].copy_(mask)\n\n def after_update(self):\n self.observations[0].copy_(self.observations[-1])\n self.states[0].copy_(self.states[-1])\n self.masks[0].copy_(self.masks[-1])\n\n def compute_returns(self, next_value, use_gae, gamma, tau):\n if use_gae:\n self.value_preds[-1] = next_value\n gae = 0\n for step in reversed(range(self.rewards.size(0))):\n delta = self.rewards[step] + gamma * self.value_preds[step +\n 1] * self.masks[step\n +\n 1] - self.value_preds[step]\n gae = delta + gamma * tau * self.masks[step + 1] * gae\n self.returns[step] = gae + self.value_preds[step]\n else:\n self.returns[-1] = next_value\n for step in reversed(range(self.rewards.size(0))):\n self.returns[step] = self.returns[step + 1] * \\\n gamma * self.masks[step + 1] + self.rewards[step]\n\n def feed_forward_generator(self, advantages, num_mini_batch):\n num_steps, num_processes = self.rewards.size()[0:2]\n batch_size = num_processes * num_steps\n assert batch_size >= num_mini_batch, \"ppo req batch size to be greater than number of mini batches\"\n mini_batch_size = batch_size // num_mini_batch\n sampler = BatchSampler(\n SubsetRandomSampler(range(batch_size)),\n mini_batch_size,\n drop_last=False)\n for indices in sampler:\n indices = torch.LongTensor(indices)\n\n if advantages.is_cuda:\n indices = indices.cuda()\n\n observations_batch = self.observations[:-1].view(\n -1,\n *self.observations.size()[2:])[indices]\n states_batch = self.states[:-1].view(-1,\n self.states.size(-1))[indices]\n actions_batch = self.actions.view(-1,\n self.actions.size(-1))[indices]\n return_batch = self.returns[:-1].view(-1, 1)[indices]\n masks_batch = self.masks[:-1].view(-1, 1)[indices]\n old_action_log_probs_batch = self.action_log_probs.view(-1,\n 1)[indices]\n adv_targ = advantages.view(-1, 1)[indices]\n\n yield observations_batch, states_batch, actions_batch, \\\n return_batch, masks_batch, old_action_log_probs_batch, adv_targ\n\n def recurrent_generator(self, advantages, num_mini_batch):\n num_processes = self.rewards.size(1)\n num_envs_per_batch = num_processes // num_mini_batch\n perm = torch.randperm(num_processes)\n for start_ind in range(0, num_processes, num_envs_per_batch):\n observations_batch = []\n states_batch = []\n actions_batch = []\n return_batch = []\n masks_batch = []\n old_action_log_probs_batch = []\n adv_targ = []\n #pdb.set_trace()\n for offset in range(num_envs_per_batch):\n ind = perm[start_ind + offset]\n observations_batch.append(self.observations[:-1, ind])\n states_batch.append(self.states[:-1, ind])\n actions_batch.append(self.actions[:, ind])\n return_batch.append(self.returns[:-1, ind])\n masks_batch.append(self.masks[:-1, ind])\n old_action_log_probs_batch.append(\n self.action_log_probs[:, ind])\n adv_targ.append(advantages[:, ind])\n #pdb.set_trace()\n observations_batch = torch.cat(observations_batch, 0)\n states_batch = torch.cat(states_batch, 0)\n actions_batch = torch.cat(actions_batch, 0)\n return_batch = torch.cat(return_batch, 0)\n masks_batch = torch.cat(masks_batch, 0)\n old_action_log_probs_batch = torch.cat(old_action_log_probs_batch,\n 0)\n adv_targ = torch.cat(adv_targ, 0)\n\n yield observations_batch, states_batch, actions_batch, \\\n return_batch, masks_batch, old_action_log_probs_batch, adv_targ\n" ]
[ [ "torch.zeros", "torch.cat", "torch.randperm", "torch.ones", "torch.LongTensor" ] ]
ajupatatero/neurasim
[ "c1d3f8163a7389b06a13e453daa98ad5157d9b2e" ]
[ "util/unit_test/potential_test/cp_potential.py" ]
[ "import numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\nfrom matplotlib.colors import BoundaryNorm\nimport matplotlib.image as mpimg\n\nUinf=1\nR=15\nPI=np.pi\nalpha = 1\nw = alpha/R\n\ngamma= -w * 2*PI* R*R\n\nangle = np.linspace(0, 360, 360)\n\ncp = 1 - (4*(np.sin(angle*(PI/180) )**2) + (2*gamma*np.sin(angle *(PI/180)))/(PI*R*Uinf) + (gamma/(2*PI*R*Uinf))**2 )\n\n\n\n\nfig, ax = plt.subplots()\n\nax.plot(angle, cp, '--k')\n#ax.plot(angle, Z[edge_x,edge_y], 'ok', markersize=5)\n\n\n#ax.set_ylim(limits[0], limits[1]) \n\n#Grid\nax.xaxis.set_minor_locator(AutoMinorLocator(4))\nax.yaxis.set_minor_locator(AutoMinorLocator(4))\nax.grid(which='major', color='#CCCCCC', linestyle='-', alpha=1)\nax.grid(which='minor', color='#CCCCCC', linestyle='--', alpha=0.5)\n\nfig.savefig(f'./cp_{alpha}.png')\nplt.close()" ]
[ [ "numpy.sin", "matplotlib.ticker.AutoMinorLocator", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "numpy.linspace" ] ]
alexbjorling/acquisition-framework
[ "4090381344aabca05155612845ba4e4a47455dc3" ]
[ "contrast/detectors/LC400Buffer.py" ]
[ "from .Detector import Detector\n\nimport time\nimport numpy as np\nimport PyTango\n\n\nclass LC400Buffer(Detector):\n \"\"\"\n Class representing the LC400 piezo machine under the\n control of the LC400ScanControl Tango device, used for\n reading the flyscan positions.\n \"\"\"\n\n def __init__(self, name=None, device=None, xaxis=2, yaxis=3, zaxis=1):\n self.proxy = PyTango.DeviceProxy(device)\n Detector.__init__(self, name=name)\n self.xaxis = xaxis\n self.yaxis = yaxis\n self.zaxis = zaxis\n\n def initialize(self):\n self.proxy.init()\n\n def stop(self):\n self.proxy.Stop()\n\n def busy(self):\n ok_states = (PyTango.DevState.STANDBY, PyTango.DevState.ON)\n return not (self.proxy.State() in ok_states)\n\n def __emergency_backup(self):\n # grab these values in case we have to restart and reset\n grab_keys = (\"FlyScanMotorStartPosition\", \"FlyScanMotorEndPosition\",\n \"NumberOfIntervals\", \"GateWidth\", \"GateLatency\",\n \"FlyScanMotorAxis\")\n self.sc_params = {\n k: self.proxy.read_attribute(k).value for k in grab_keys}\n\n def __emergency_recover(self):\n ec0 = PyTango.DeviceProxy('tango/admin/b-v-nanomax-ec-0')\n ioc = PyTango.DeviceProxy('tango/admin/b-nanomax-ec-6')\n ec0.HardKillServer('LC400ScanControl/B303A')\n ioc.HardKillServer('NpointLC400/B303A')\n print('Killing the npoint devices and waiting...')\n for i in range(10):\n print('*')\n time.sleep(1)\n ioc.DevStart('NpointLC400/B303A')\n print('Starting the npoint motor device and waiting...')\n for i in range(10):\n print('*')\n time.sleep(1)\n ec0.DevStart('LC400ScanControl/B303A')\n print('Starting the npoint scancontrol device and waiting...')\n for i in range(10):\n print('*')\n time.sleep(1)\n self.initialize()\n for k, v in self.sc_params.items():\n self.proxy.write_attribute(k, v)\n self.proxy.ConfigureLC400Motion()\n self.proxy.ConfigureLC400Recorder()\n self.proxy.ConfigureStanford()\n\n def read(self):\n self.__emergency_backup()\n try:\n self.proxy.ReadLC400Buffer()\n data = {1: self.proxy.Axis1Positions,\n 2: self.proxy.Axis2Positions,\n 3: self.proxy.Axis3Positions}\n self.length = len(data[1])\n except PyTango.DevFailed:\n self.__emergency_recover()\n fake = np.ones(self.length, dtype=np.float) * -1\n data = {i: fake for i in (1, 2, 3)}\n return {'x': data[self.xaxis],\n 'y': data[self.yaxis],\n 'z': data[self.zaxis]}\n\n def start(self):\n \"\"\"\n Placeholder, this detector just reads out whatever buffer is on the\n scancontrol device. That device is managed manually from macros.\n \"\"\"\n pass\n" ]
[ [ "numpy.ones" ] ]
Exi666/MetPy
[ "c3cf8b9855e0ce7c14347e9d000fc3d531a18e1c" ]
[ "src/metpy/calc/tools.py" ]
[ "# Copyright (c) 2016,2017,2018,2019 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Contains a collection of generally useful calculation tools.\"\"\"\nimport functools\nfrom operator import itemgetter\n\nimport numpy as np\nfrom numpy.core.numeric import normalize_axis_index\nimport numpy.ma as ma\nfrom scipy.spatial import cKDTree\nimport xarray as xr\n\nfrom ..cbook import broadcast_indices, result_type\nfrom ..interpolate import interpolate_1d, log_interpolate_1d\nfrom ..package_tools import Exporter\nfrom ..units import atleast_1d, check_units, concatenate, diff, units\nfrom ..xarray import check_axis, preprocess_xarray\n\nexporter = Exporter(globals())\n\nUND = 'UND'\nUND_ANGLE = -999.\nDIR_STRS = (\n 'N', 'NNE', 'NE', 'ENE',\n 'E', 'ESE', 'SE', 'SSE',\n 'S', 'SSW', 'SW', 'WSW',\n 'W', 'WNW', 'NW', 'NNW',\n UND\n) # note the order matters!\n\nMAX_DEGREE_ANGLE = 360 * units.degree\nBASE_DEGREE_MULTIPLIER = 22.5 * units.degree\n\nDIR_DICT = {dir_str: i * BASE_DEGREE_MULTIPLIER for i, dir_str in enumerate(DIR_STRS)}\nDIR_DICT[UND] = np.nan\n\n\n@exporter.export\n@preprocess_xarray\ndef resample_nn_1d(a, centers):\n \"\"\"Return one-dimensional nearest-neighbor indexes based on user-specified centers.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of numeric values from which to\n extract indexes of nearest-neighbors\n centers : array-like\n 1-dimensional array of numeric values representing a subset of values to approximate\n\n Returns\n -------\n An array of indexes representing values closest to given array values\n\n \"\"\"\n ix = []\n for center in centers:\n index = (np.abs(a - center)).argmin()\n if index not in ix:\n ix.append(index)\n return ix\n\n\n@exporter.export\n@preprocess_xarray\ndef nearest_intersection_idx(a, b):\n \"\"\"Determine the index of the point just before two lines with common x values.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of y-values for line 1\n b : array-like\n 1-dimensional array of y-values for line 2\n\n Returns\n -------\n An array of indexes representing the index of the values\n just before the intersection(s) of the two lines.\n\n \"\"\"\n # Difference in the two y-value sets\n difference = a - b\n\n # Determine the point just before the intersection of the lines\n # Will return multiple points for multiple intersections\n sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))\n\n return sign_change_idx\n\n\n@exporter.export\n@preprocess_xarray\n@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))\ndef find_intersections(x, a, b, direction='all', log_x=False):\n \"\"\"Calculate the best estimate of intersection.\n\n Calculates the best estimates of the intersection of two y-value\n data sets that share a common x-value set.\n\n Parameters\n ----------\n x : array-like\n 1-dimensional array of numeric x-values\n a : array-like\n 1-dimensional array of y-values for line 1\n b : array-like\n 1-dimensional array of y-values for line 2\n direction : string, optional\n specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),\n or 'decreasing' (b becoming greater than a). Defaults to 'all'.\n log_x : bool, optional\n Use logarithmic interpolation along the `x` axis (i.e. for finding intersections\n in pressure coordinates). Default is False.\n\n Returns\n -------\n A tuple (x, y) of array-like with the x and y coordinates of the\n intersections of the lines.\n\n \"\"\"\n # Change x to logarithmic if log_x=True\n if log_x is True:\n x = np.log(x)\n\n # Find the index of the points just before the intersection(s)\n nearest_idx = nearest_intersection_idx(a, b)\n next_idx = nearest_idx + 1\n\n # Determine the sign of the change\n sign_change = np.sign(a[next_idx] - b[next_idx])\n\n # x-values around each intersection\n _, x0 = _next_non_masked_element(x, nearest_idx)\n _, x1 = _next_non_masked_element(x, next_idx)\n\n # y-values around each intersection for the first line\n _, a0 = _next_non_masked_element(a, nearest_idx)\n _, a1 = _next_non_masked_element(a, next_idx)\n\n # y-values around each intersection for the second line\n _, b0 = _next_non_masked_element(b, nearest_idx)\n _, b1 = _next_non_masked_element(b, next_idx)\n\n # Calculate the x-intersection. This comes from finding the equations of the two lines,\n # one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),\n # finding their intersection, and reducing with a bunch of algebra.\n delta_y0 = a0 - b0\n delta_y1 = a1 - b1\n intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)\n\n # Calculate the y-intersection of the lines. Just plug the x above into the equation\n # for the line through the a points. One could solve for y like x above, but this\n # causes weirder unit behavior and seems a little less good numerically.\n intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0\n\n # If there's no intersections, return\n if len(intersect_x) == 0:\n return intersect_x, intersect_y\n\n # Return x to linear if log_x is True\n if log_x is True:\n intersect_x = np.exp(intersect_x)\n\n # Check for duplicates\n duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)\n\n # Make a mask based on the direction of sign change desired\n if direction == 'increasing':\n mask = sign_change > 0\n elif direction == 'decreasing':\n mask = sign_change < 0\n elif direction == 'all':\n return intersect_x[duplicate_mask], intersect_y[duplicate_mask]\n else:\n raise ValueError('Unknown option for direction: {0}'.format(str(direction)))\n\n return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]\n\n\ndef _next_non_masked_element(a, idx):\n \"\"\"Return the next non masked element of a masked array.\n\n If an array is masked, return the next non-masked element (if the given index is masked).\n If no other unmasked points are after the given masked point, returns none.\n\n Parameters\n ----------\n a : array-like\n 1-dimensional array of numeric values\n idx : integer\n index of requested element\n\n Returns\n -------\n Index of next non-masked element and next non-masked element\n\n \"\"\"\n try:\n next_idx = idx + a[idx:].mask.argmin()\n if ma.is_masked(a[next_idx]):\n return None, None\n else:\n return next_idx, a[next_idx]\n except (AttributeError, TypeError, IndexError):\n return idx, a[idx]\n\n\ndef _delete_masked_points(*arrs):\n \"\"\"Delete masked points from arrays.\n\n Takes arrays and removes masked points to help with calculations and plotting.\n\n Parameters\n ----------\n arrs : one or more array-like\n source arrays\n\n Returns\n -------\n arrs : one or more array-like\n arrays with masked elements removed\n\n \"\"\"\n if any(hasattr(a, 'mask') for a in arrs):\n keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))\n return tuple(ma.asarray(a[keep]) for a in arrs)\n else:\n return arrs\n\n\n@exporter.export\n@preprocess_xarray\ndef reduce_point_density(points, radius, priority=None):\n r\"\"\"Return a mask to reduce the density of points in irregularly-spaced data.\n\n This function is used to down-sample a collection of scattered points (e.g. surface\n data), returning a mask that can be used to select the points from one or more arrays\n (e.g. arrays of temperature and dew point). The points selected can be controlled by\n providing an array of ``priority`` values (e.g. rainfall totals to ensure that\n stations with higher precipitation remain in the mask). The points and radius can be\n specified with units. If none are provided, meters are assumed.\n\n Parameters\n ----------\n points : (N, K) array-like\n N locations of the points in K dimensional space\n radius : `pint.Quantity` or float\n Minimum radius allowed between points. If units are not provided, meters is assumed.\n priority : (N, K) array-like, optional\n If given, this should have the same shape as ``points``; these values will\n be used to control selection priority for points.\n\n Returns\n -------\n (N,) array-like of boolean values indicating whether points should be kept. This\n can be used directly to index numpy arrays to return only the desired points.\n\n Examples\n --------\n >>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)\n array([ True, False, True])\n >>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,\n ... priority=np.array([0.1, 0.9, 0.3]))\n array([False, True, False])\n\n \"\"\"\n # Handle input with units. Assume meters if units are not specified\n if hasattr(radius, 'units'):\n radius = radius.to('m').m\n\n if hasattr(points, 'units'):\n points = points.to('m').m\n\n # Handle 1D input\n if points.ndim < 2:\n points = points.reshape(-1, 1)\n\n # Make a kd-tree to speed searching of data.\n tree = cKDTree(points)\n\n # Need to use sorted indices rather than sorting the position\n # so that the keep mask matches *original* order.\n if priority is not None:\n # Need to sort the locations in decreasing priority.\n sorted_indices = np.argsort(priority)[::-1]\n else:\n # Take advantage of iterator nature of range here to avoid making big lists\n sorted_indices = range(len(points))\n\n # Keep all points initially\n keep = np.ones(len(points), dtype=np.bool)\n\n # Loop over all the potential points\n for ind in sorted_indices:\n # Only proceed if we haven't already excluded this point\n if keep[ind]:\n # Find the neighbors and eliminate them\n neighbors = tree.query_ball_point(points[ind], radius)\n keep[neighbors] = False\n\n # We just removed ourselves, so undo that\n keep[ind] = True\n\n return keep\n\n\ndef _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):\n \"\"\"Calculate the bounding pressure and height in a layer.\n\n Given pressure, optional heights, and a bound, return either the closest pressure/height\n or interpolated pressure/height. If no heights are provided, a standard atmosphere\n ([NOAA1976]_) is assumed.\n\n Parameters\n ----------\n pressure : `pint.Quantity`\n Atmospheric pressures\n bound : `pint.Quantity`\n Bound to retrieve (in pressure or height)\n heights : `pint.Quantity`, optional\n Atmospheric heights associated with the pressure levels. Defaults to using\n heights calculated from ``pressure`` assuming a standard atmosphere.\n interpolate : boolean, optional\n Interpolate the bound or return the nearest. Defaults to True.\n\n Returns\n -------\n `pint.Quantity`\n The bound pressure and height.\n\n \"\"\"\n # avoid circular import if basic.py ever imports something from tools.py\n from .basic import height_to_pressure_std, pressure_to_height_std\n # Make sure pressure is monotonically decreasing\n sort_inds = np.argsort(pressure)[::-1]\n pressure = pressure[sort_inds]\n if heights is not None:\n heights = heights[sort_inds]\n\n # Bound is given in pressure\n if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:\n # If the bound is in the pressure data, we know the pressure bound exactly\n if bound in pressure:\n bound_pressure = bound\n # If we have heights, we know the exact height value, otherwise return standard\n # atmosphere height for the pressure\n if heights is not None:\n bound_height = heights[pressure == bound_pressure]\n else:\n bound_height = pressure_to_height_std(bound_pressure)\n # If bound is not in the data, return the nearest or interpolated values\n else:\n if interpolate:\n bound_pressure = bound # Use the user specified bound\n if heights is not None: # Interpolate heights from the height data\n bound_height = log_interpolate_1d(bound_pressure, pressure, heights)\n else: # If not heights given, use the standard atmosphere\n bound_height = pressure_to_height_std(bound_pressure)\n else: # No interpolation, find the closest values\n idx = (np.abs(pressure - bound)).argmin()\n bound_pressure = pressure[idx]\n if heights is not None:\n bound_height = heights[idx]\n else:\n bound_height = pressure_to_height_std(bound_pressure)\n\n # Bound is given in height\n elif bound.dimensionality == {'[length]': 1.0}:\n # If there is height data, see if we have the bound or need to interpolate/find nearest\n if heights is not None:\n if bound in heights: # Bound is in the height data\n bound_height = bound\n bound_pressure = pressure[heights == bound]\n else: # Bound is not in the data\n if interpolate:\n bound_height = bound\n\n # Need to cast back to the input type since interp (up to at least numpy\n # 1.13 always returns float64. This can cause upstream users problems,\n # resulting in something like np.append() to upcast.\n bound_pressure = (np.interp(np.atleast_1d(bound.m), heights.m,\n pressure.m).astype(result_type(bound))\n * pressure.units)\n else:\n idx = (np.abs(heights - bound)).argmin()\n bound_pressure = pressure[idx]\n bound_height = heights[idx]\n else: # Don't have heights, so assume a standard atmosphere\n bound_height = bound\n bound_pressure = height_to_pressure_std(bound)\n # If interpolation is on, this is all we need, if not, we need to go back and\n # find the pressure closest to this and refigure the bounds\n if not interpolate:\n idx = (np.abs(pressure - bound_pressure)).argmin()\n bound_pressure = pressure[idx]\n bound_height = pressure_to_height_std(bound_pressure)\n\n # Bound has invalid units\n else:\n raise ValueError('Bound must be specified in units of length or pressure.')\n\n # If the bound is out of the range of the data, we shouldn't extrapolate\n if not (_greater_or_close(bound_pressure, np.nanmin(pressure.m) * pressure.units)\n and _less_or_close(bound_pressure, np.nanmax(pressure.m) * pressure.units)):\n raise ValueError('Specified bound is outside pressure range.')\n if heights is not None and not (_less_or_close(bound_height,\n np.nanmax(heights.m) * heights.units)\n and _greater_or_close(bound_height,\n np.nanmin(heights.m)\n * heights.units)):\n raise ValueError('Specified bound is outside height range.')\n\n return bound_pressure, bound_height\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[length]')\ndef get_layer_heights(heights, depth, *args, bottom=None, interpolate=True, with_agl=False):\n \"\"\"Return an atmospheric layer from upper air data with the requested bottom and depth.\n\n This function will subset an upper air dataset to contain only the specified layer using\n the heights only.\n\n Parameters\n ----------\n heights : array-like\n Atmospheric heights\n depth : `pint.Quantity`\n The thickness of the layer\n args : array-like\n Atmospheric variable(s) measured at the given pressures\n bottom : `pint.Quantity`, optional\n The bottom of the layer\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data. Defaults\n to True.\n with_agl : bool, optional\n Returns the heights as above ground level by subtracting the minimum height in the\n provided heights. Defaults to False.\n\n Returns\n -------\n `pint.Quantity, pint.Quantity`\n The height and data variables of the layer\n\n \"\"\"\n # Make sure pressure and datavars are the same length\n for datavar in args:\n if len(heights) != len(datavar):\n raise ValueError('Height and data variables must have the same length.')\n\n # If we want things in AGL, subtract the minimum height from all height values\n if with_agl:\n sfc_height = np.min(heights)\n heights = heights - sfc_height\n\n # If the bottom is not specified, make it the surface\n if bottom is None:\n bottom = heights[0]\n\n # Make heights and arguments base units\n heights = heights.to_base_units()\n bottom = bottom.to_base_units()\n\n # Calculate the top of the layer\n top = bottom + depth\n\n ret = [] # returned data variables in layer\n\n # Ensure heights are sorted in ascending order\n sort_inds = np.argsort(heights)\n heights = heights[sort_inds]\n\n # Mask based on top and bottom\n inds = _greater_or_close(heights, bottom) & _less_or_close(heights, top)\n heights_interp = heights[inds]\n\n # Interpolate heights at bounds if necessary and sort\n if interpolate:\n # If we don't have the bottom or top requested, append them\n if top not in heights_interp:\n heights_interp = np.sort(np.append(heights_interp.m, top.m)) * heights.units\n if bottom not in heights_interp:\n heights_interp = np.sort(np.append(heights_interp.m, bottom.m)) * heights.units\n\n ret.append(heights_interp)\n\n for datavar in args:\n # Ensure that things are sorted in ascending order\n datavar = datavar[sort_inds]\n\n if interpolate:\n # Interpolate for the possibly missing bottom/top values\n datavar_interp = interpolate_1d(heights_interp, heights, datavar)\n datavar = datavar_interp\n else:\n datavar = datavar[inds]\n\n ret.append(datavar)\n return ret\n\n\n@exporter.export\n@preprocess_xarray\n@check_units('[pressure]')\ndef get_layer(pressure, *args, heights=None, bottom=None, depth=100 * units.hPa,\n interpolate=True):\n r\"\"\"Return an atmospheric layer from upper air data with the requested bottom and depth.\n\n This function will subset an upper air dataset to contain only the specified layer. The\n bottom of the layer can be specified with a pressure or height above the surface\n pressure. The bottom defaults to the surface pressure. The depth of the layer can be\n specified in terms of pressure or height above the bottom of the layer. If the top and\n bottom of the layer are not in the data, they are interpolated by default.\n\n Parameters\n ----------\n pressure : array-like\n Atmospheric pressure profile\n args : array-like\n Atmospheric variable(s) measured at the given pressures\n heights: array-like, optional\n Atmospheric heights corresponding to the given pressures. Defaults to using\n heights calculated from ``p`` assuming a standard atmosphere [NOAA1976]_.\n bottom : `pint.Quantity`, optional\n The bottom of the layer as a pressure or height above the surface pressure. Defaults\n to the highest pressure or lowest height given.\n depth : `pint.Quantity`, optional\n The thickness of the layer as a pressure or height above the bottom of the layer.\n Defaults to 100 hPa.\n interpolate : bool, optional\n Interpolate the top and bottom points if they are not in the given data. Defaults\n to True.\n\n Returns\n -------\n `pint.Quantity, pint.Quantity`\n The pressure and data variables of the layer\n\n \"\"\"\n # If we get the depth kwarg, but it's None, set it to the default as well\n if depth is None:\n depth = 100 * units.hPa\n\n # Make sure pressure and datavars are the same length\n for datavar in args:\n if len(pressure) != len(datavar):\n raise ValueError('Pressure and data variables must have the same length.')\n\n # If the bottom is not specified, make it the surface pressure\n if bottom is None:\n bottom = np.nanmax(pressure.m) * pressure.units\n\n bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,\n heights=heights,\n interpolate=interpolate)\n\n # Calculate the top if whatever units depth is in\n if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:\n top = bottom_pressure - depth\n elif depth.dimensionality == {'[length]': 1}:\n top = bottom_height + depth\n else:\n raise ValueError('Depth must be specified in units of length or pressure')\n\n top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,\n interpolate=interpolate)\n\n ret = [] # returned data variables in layer\n\n # Ensure pressures are sorted in ascending order\n sort_inds = np.argsort(pressure)\n pressure = pressure[sort_inds]\n\n # Mask based on top and bottom pressure\n inds = (_less_or_close(pressure, bottom_pressure)\n & _greater_or_close(pressure, top_pressure))\n p_interp = pressure[inds]\n\n # Interpolate pressures at bounds if necessary and sort\n if interpolate:\n # If we don't have the bottom or top requested, append them\n if not np.any(np.isclose(top_pressure, p_interp)):\n p_interp = np.sort(np.append(p_interp.m, top_pressure.m)) * pressure.units\n if not np.any(np.isclose(bottom_pressure, p_interp)):\n p_interp = np.sort(np.append(p_interp.m, bottom_pressure.m)) * pressure.units\n\n ret.append(p_interp[::-1])\n\n for datavar in args:\n # Ensure that things are sorted in ascending order\n datavar = datavar[sort_inds]\n\n if interpolate:\n # Interpolate for the possibly missing bottom/top values\n datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)\n datavar = datavar_interp\n else:\n datavar = datavar[inds]\n\n ret.append(datavar[::-1])\n return ret\n\n\n@exporter.export\n@preprocess_xarray\ndef find_bounding_indices(arr, values, axis, from_below=True):\n \"\"\"Find the indices surrounding the values within arr along axis.\n\n Returns a set of above, below, good. Above and below are lists of arrays of indices.\n These lists are formulated such that they can be used directly to index into a numpy\n array and get the expected results (no extra slices or ellipsis necessary). `good` is\n a boolean array indicating the \"columns\" that actually had values to bound the desired\n value(s).\n\n Parameters\n ----------\n arr : array-like\n Array to search for values\n\n values: array-like\n One or more values to search for in `arr`\n\n axis : int\n The dimension of `arr` along which to search.\n\n from_below : bool, optional\n Whether to search from \"below\" (i.e. low indices to high indices). If `False`,\n the search will instead proceed from high indices to low indices. Defaults to `True`.\n\n Returns\n -------\n above : list of arrays\n List of broadcasted indices to the location above the desired value\n\n below : list of arrays\n List of broadcasted indices to the location below the desired value\n\n good : array\n Boolean array indicating where the search found proper bounds for the desired value\n\n \"\"\"\n # The shape of generated indices is the same as the input, but with the axis of interest\n # replaced by the number of values to search for.\n indices_shape = list(arr.shape)\n indices_shape[axis] = len(values)\n\n # Storage for the found indices and the mask for good locations\n indices = np.empty(indices_shape, dtype=np.int)\n good = np.empty(indices_shape, dtype=np.bool)\n\n # Used to put the output in the proper location\n store_slice = [slice(None)] * arr.ndim\n\n # Loop over all of the values and for each, see where the value would be found from a\n # linear search\n for level_index, value in enumerate(values):\n # Look for changes in the value of the test for <= value in consecutive points\n # Taking abs() because we only care if there is a flip, not which direction.\n switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))\n\n # Good points are those where it's not just 0's along the whole axis\n good_search = np.any(switches, axis=axis)\n\n if from_below:\n # Look for the first switch; need to add 1 to the index since argmax is giving the\n # index within the difference array, which is one smaller.\n index = switches.argmax(axis=axis) + 1\n else:\n # Generate a list of slices to reverse the axis of interest so that searching from\n # 0 to N is starting at the \"top\" of the axis.\n arr_slice = [slice(None)] * arr.ndim\n arr_slice[axis] = slice(None, None, -1)\n\n # Same as above, but we use the slice to come from the end; then adjust those\n # indices to measure from the front.\n index = arr.shape[axis] - 1 - switches[tuple(arr_slice)].argmax(axis=axis)\n\n # Set all indices where the results are not good to 0\n index[~good_search] = 0\n\n # Put the results in the proper slice\n store_slice[axis] = level_index\n indices[tuple(store_slice)] = index\n good[tuple(store_slice)] = good_search\n\n # Create index values for broadcasting arrays\n above = broadcast_indices(arr, indices, arr.ndim, axis)\n below = broadcast_indices(arr, indices - 1, arr.ndim, axis)\n\n return above, below, good\n\n\ndef _greater_or_close(a, value, **kwargs):\n r\"\"\"Compare values for greater or close to boolean masks.\n\n Returns a boolean mask for values greater than or equal to a target within a specified\n absolute or relative tolerance (as in :func:`numpy.isclose`).\n\n Parameters\n ----------\n a : array-like\n Array of values to be compared\n value : float\n Comparison value\n\n Returns\n -------\n array-like\n Boolean array where values are greater than or nearly equal to value.\n\n \"\"\"\n return (a > value) | np.isclose(a, value, **kwargs)\n\n\ndef _less_or_close(a, value, **kwargs):\n r\"\"\"Compare values for less or close to boolean masks.\n\n Returns a boolean mask for values less than or equal to a target within a specified\n absolute or relative tolerance (as in :func:`numpy.isclose`).\n\n Parameters\n ----------\n a : array-like\n Array of values to be compared\n value : float\n Comparison value\n\n Returns\n -------\n array-like\n Boolean array where values are less than or nearly equal to value.\n\n \"\"\"\n return (a < value) | np.isclose(a, value, **kwargs)\n\n\n@exporter.export\n@preprocess_xarray\ndef lat_lon_grid_deltas(longitude, latitude, **kwargs):\n r\"\"\"Calculate the delta between grid points that are in a latitude/longitude format.\n\n Calculate the signed delta distance between grid points when the grid spacing is defined by\n delta lat/lon rather than delta x/y\n\n Parameters\n ----------\n longitude : array_like\n array of longitudes defining the grid\n latitude : array_like\n array of latitudes defining the grid\n kwargs\n Other keyword arguments to pass to :class:`~pyproj.Geod`\n\n Returns\n -------\n dx, dy:\n at least two dimensional arrays of signed deltas between grid points in the x and y\n direction\n\n Notes\n -----\n Accepts 1D, 2D, or higher arrays for latitude and longitude\n Assumes [..., Y, X] for >=2 dimensional arrays\n\n \"\"\"\n from pyproj import Geod\n\n # Inputs must be the same number of dimensions\n if latitude.ndim != longitude.ndim:\n raise ValueError('Latitude and longitude must have the same number of dimensions.')\n\n # If we were given 1D arrays, make a mesh grid\n if latitude.ndim < 2:\n longitude, latitude = np.meshgrid(longitude, latitude)\n\n geod_args = {'ellps': 'sphere'}\n if kwargs:\n geod_args = kwargs\n\n g = Geod(**geod_args)\n\n forward_az, _, dy = g.inv(longitude[..., :-1, :], latitude[..., :-1, :],\n longitude[..., 1:, :], latitude[..., 1:, :])\n dy[(forward_az < -90.) | (forward_az > 90.)] *= -1\n\n forward_az, _, dx = g.inv(longitude[..., :, :-1], latitude[..., :, :-1],\n longitude[..., :, 1:], latitude[..., :, 1:])\n dx[(forward_az < 0.) | (forward_az > 180.)] *= -1\n\n return dx * units.meter, dy * units.meter\n\n\n@exporter.export\ndef grid_deltas_from_dataarray(f):\n \"\"\"Calculate the horizontal deltas between grid points of a DataArray.\n\n Calculate the signed delta distance between grid points of a DataArray in the horizontal\n directions, whether the grid is lat/lon or x/y.\n\n Parameters\n ----------\n f : `xarray.DataArray`\n Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)\n dimension order\n\n Returns\n -------\n dx, dy:\n arrays of signed deltas between grid points in the x and y directions with dimensions\n matching those of `f`.\n\n See Also\n --------\n lat_lon_grid_deltas\n\n \"\"\"\n if f.metpy.crs['grid_mapping_name'] == 'latitude_longitude':\n dx, dy = lat_lon_grid_deltas(f.metpy.x, f.metpy.y,\n initstring=f.metpy.cartopy_crs.proj4_init)\n slc_x = slc_y = tuple([np.newaxis] * (f.ndim - 2) + [slice(None)] * 2)\n else:\n dx = np.diff(f.metpy.x.metpy.unit_array.to('m').magnitude) * units('m')\n dy = np.diff(f.metpy.y.metpy.unit_array.to('m').magnitude) * units('m')\n slc = [np.newaxis] * (f.ndim - 2)\n slc_x = tuple(slc + [np.newaxis, slice(None)])\n slc_y = tuple(slc + [slice(None), np.newaxis])\n return dx[slc_x], dy[slc_y]\n\n\ndef xarray_derivative_wrap(func):\n \"\"\"Decorate the derivative functions to make them work nicely with DataArrays.\n\n This will automatically determine if the coordinates can be pulled directly from the\n DataArray, or if a call to lat_lon_grid_deltas is needed.\n \"\"\"\n @functools.wraps(func)\n def wrapper(f, **kwargs):\n if 'x' in kwargs or 'delta' in kwargs:\n # Use the usual DataArray to pint.Quantity preprocessing wrapper\n return preprocess_xarray(func)(f, **kwargs)\n elif isinstance(f, xr.DataArray):\n # Get axis argument, defaulting to first dimension\n axis = f.metpy.find_axis_name(kwargs.get('axis', 0))\n\n # Initialize new kwargs with the axis number\n new_kwargs = {'axis': f.get_axis_num(axis)}\n\n if check_axis(f[axis], 'time'):\n # Time coordinate, need to get time deltas\n new_kwargs['delta'] = f[axis].metpy.time_deltas\n elif check_axis(f[axis], 'longitude'):\n # Longitude coordinate, need to get grid deltas\n new_kwargs['delta'], _ = grid_deltas_from_dataarray(f)\n elif check_axis(f[axis], 'latitude'):\n # Latitude coordinate, need to get grid deltas\n _, new_kwargs['delta'] = grid_deltas_from_dataarray(f)\n else:\n # General coordinate, use as is\n new_kwargs['x'] = f[axis].metpy.unit_array\n\n # Calculate and return result as a DataArray\n result = func(f.metpy.unit_array, **new_kwargs)\n return xr.DataArray(result.magnitude,\n coords=f.coords,\n dims=f.dims,\n attrs={'units': str(result.units)})\n else:\n # Error\n raise ValueError('Must specify either \"x\" or \"delta\" for value positions when \"f\" '\n 'is not a DataArray.')\n return wrapper\n\n\n@exporter.export\n@xarray_derivative_wrap\ndef first_derivative(f, **kwargs):\n \"\"\"Calculate the first derivative of a grid of values.\n\n Works for both regularly-spaced data and grids with varying spacing.\n\n Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with\n attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or\n `delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned\n as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached\n coordinate information belonging to `axis` will be used and the derivative will be returned\n as an `xarray.DataArray`.\n\n This uses 3 points to calculate the derivative, using forward or backward at the edges of\n the grid as appropriate, and centered elsewhere. The irregular spacing is handled\n explicitly, using the formulation as specified by [Bowen2005]_.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n axis : int or str, optional\n The array axis along which to take the derivative. If `f` is ndarray-like, must be an\n integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate\n dimension name or the axis type) or integer (referring to axis number), unless using\n implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults\n to 0.\n x : array-like, optional\n The coordinate values corresponding to the grid points in `f`.\n delta : array-like, optional\n Spacing between the grid points in `f`. Should be one item less than the size\n of `f` along `axis`.\n\n Returns\n -------\n array-like\n The first derivative calculated along the selected axis.\n\n See Also\n --------\n second_derivative\n\n \"\"\"\n n, axis, delta = _process_deriv_args(f, kwargs)\n\n # create slice objects --- initially all are [:, :, ..., :]\n slice0 = [slice(None)] * n\n slice1 = [slice(None)] * n\n slice2 = [slice(None)] * n\n delta_slice0 = [slice(None)] * n\n delta_slice1 = [slice(None)] * n\n\n # First handle centered case\n slice0[axis] = slice(None, -2)\n slice1[axis] = slice(1, -1)\n slice2[axis] = slice(2, None)\n delta_slice0[axis] = slice(None, -1)\n delta_slice1[axis] = slice(1, None)\n\n combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]\n delta_diff = delta[tuple(delta_slice1)] - delta[tuple(delta_slice0)]\n center = (- delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])\n * f[tuple(slice0)]\n + delta_diff / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])\n * f[tuple(slice1)]\n + delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])\n * f[tuple(slice2)])\n\n # Fill in \"left\" edge with forward difference\n slice0[axis] = slice(None, 1)\n slice1[axis] = slice(1, 2)\n slice2[axis] = slice(2, 3)\n delta_slice0[axis] = slice(None, 1)\n delta_slice1[axis] = slice(1, 2)\n\n combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]\n big_delta = combined_delta + delta[tuple(delta_slice0)]\n left = (- big_delta / (combined_delta * delta[tuple(delta_slice0)])\n * f[tuple(slice0)]\n + combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])\n * f[tuple(slice1)]\n - delta[tuple(delta_slice0)] / (combined_delta * delta[tuple(delta_slice1)])\n * f[tuple(slice2)])\n\n # Now the \"right\" edge with backward difference\n slice0[axis] = slice(-3, -2)\n slice1[axis] = slice(-2, -1)\n slice2[axis] = slice(-1, None)\n delta_slice0[axis] = slice(-2, -1)\n delta_slice1[axis] = slice(-1, None)\n\n combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]\n big_delta = combined_delta + delta[tuple(delta_slice1)]\n right = (delta[tuple(delta_slice1)] / (combined_delta * delta[tuple(delta_slice0)])\n * f[tuple(slice0)]\n - combined_delta / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])\n * f[tuple(slice1)]\n + big_delta / (combined_delta * delta[tuple(delta_slice1)])\n * f[tuple(slice2)])\n\n return concatenate((left, center, right), axis=axis)\n\n\n@exporter.export\n@xarray_derivative_wrap\ndef second_derivative(f, **kwargs):\n \"\"\"Calculate the second derivative of a grid of values.\n\n Works for both regularly-spaced data and grids with varying spacing.\n\n Either `x` or `delta` must be specified, or `f` must be given as an `xarray.DataArray` with\n attached coordinate and projection information. If `f` is an `xarray.DataArray`, and `x` or\n `delta` are given, `f` will be converted to a `pint.Quantity` and the derivative returned\n as a `pint.Quantity`, otherwise, if neither `x` nor `delta` are given, the attached\n coordinate information belonging to `axis` will be used and the derivative will be returned\n as an `xarray.DataArray`.\n\n This uses 3 points to calculate the derivative, using forward or backward at the edges of\n the grid as appropriate, and centered elsewhere. The irregular spacing is handled\n explicitly, using the formulation as specified by [Bowen2005]_.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n axis : int or str, optional\n The array axis along which to take the derivative. If `f` is ndarray-like, must be an\n integer. If `f` is a `DataArray`, can be a string (referring to either the coordinate\n dimension name or the axis type) or integer (referring to axis number), unless using\n implicit conversion to `pint.Quantity`, in which case it must be an integer. Defaults\n to 0.\n x : array-like, optional\n The coordinate values corresponding to the grid points in `f`.\n delta : array-like, optional\n Spacing between the grid points in `f`. There should be one item less than the size\n of `f` along `axis`.\n\n Returns\n -------\n array-like\n The second derivative calculated along the selected axis.\n\n See Also\n --------\n first_derivative\n\n \"\"\"\n n, axis, delta = _process_deriv_args(f, kwargs)\n\n # create slice objects --- initially all are [:, :, ..., :]\n slice0 = [slice(None)] * n\n slice1 = [slice(None)] * n\n slice2 = [slice(None)] * n\n delta_slice0 = [slice(None)] * n\n delta_slice1 = [slice(None)] * n\n\n # First handle centered case\n slice0[axis] = slice(None, -2)\n slice1[axis] = slice(1, -1)\n slice2[axis] = slice(2, None)\n delta_slice0[axis] = slice(None, -1)\n delta_slice1[axis] = slice(1, None)\n\n combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]\n center = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])\n - f[tuple(slice1)] / (delta[tuple(delta_slice0)]\n * delta[tuple(delta_slice1)])\n + f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))\n\n # Fill in \"left\" edge\n slice0[axis] = slice(None, 1)\n slice1[axis] = slice(1, 2)\n slice2[axis] = slice(2, 3)\n delta_slice0[axis] = slice(None, 1)\n delta_slice1[axis] = slice(1, 2)\n\n combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]\n left = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])\n - f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])\n + f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))\n\n # Now the \"right\" edge\n slice0[axis] = slice(-3, -2)\n slice1[axis] = slice(-2, -1)\n slice2[axis] = slice(-1, None)\n delta_slice0[axis] = slice(-2, -1)\n delta_slice1[axis] = slice(-1, None)\n\n combined_delta = delta[tuple(delta_slice0)] + delta[tuple(delta_slice1)]\n right = 2 * (f[tuple(slice0)] / (combined_delta * delta[tuple(delta_slice0)])\n - f[tuple(slice1)] / (delta[tuple(delta_slice0)] * delta[tuple(delta_slice1)])\n + f[tuple(slice2)] / (combined_delta * delta[tuple(delta_slice1)]))\n\n return concatenate((left, center, right), axis=axis)\n\n\n@exporter.export\ndef gradient(f, **kwargs):\n \"\"\"Calculate the gradient of a grid of values.\n\n Works for both regularly-spaced data, and grids with varying spacing.\n\n Either `coordinates` or `deltas` must be specified, or `f` must be given as an\n `xarray.DataArray` with attached coordinate and projection information. If `f` is an\n `xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a\n `pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if\n neither `coordinates` nor `deltas` are given, the attached coordinate information belonging\n to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n coordinates : array-like, optional\n Sequence of arrays containing the coordinate values corresponding to the\n grid points in `f` in axis order.\n deltas : array-like, optional\n Sequence of arrays or scalars that specify the spacing between the grid points in `f`\n in axis order. There should be one item less than the size of `f` along the applicable\n axis.\n axes : sequence, optional\n Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to\n `pint.Quantity` is not used) or integers that specify the array axes along which to\n take the derivatives. Defaults to all axes of `f`. If given, and used with\n `coordinates` or `deltas`, its length must be less than or equal to that of the\n `coordinates` or `deltas` given.\n\n Returns\n -------\n tuple of array-like\n The first derivative calculated along each specified axis of the original array\n\n See Also\n --------\n laplacian, first_derivative\n\n Notes\n -----\n If this function is used without the `axes` parameter, the length of `coordinates` or\n `deltas` (as applicable) should match the number of dimensions of `f`.\n\n \"\"\"\n pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)\n return tuple(first_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})\n for ind, axis in enumerate(axes))\n\n\n@exporter.export\ndef laplacian(f, **kwargs):\n \"\"\"Calculate the laplacian of a grid of values.\n\n Works for both regularly-spaced data, and grids with varying spacing.\n\n Either `coordinates` or `deltas` must be specified, or `f` must be given as an\n `xarray.DataArray` with attached coordinate and projection information. If `f` is an\n `xarray.DataArray`, and `coordinates` or `deltas` are given, `f` will be converted to a\n `pint.Quantity` and the gradient returned as a tuple of `pint.Quantity`, otherwise, if\n neither `coordinates` nor `deltas` are given, the attached coordinate information belonging\n to `axis` will be used and the gradient will be returned as a tuple of `xarray.DataArray`.\n\n Parameters\n ----------\n f : array-like\n Array of values of which to calculate the derivative\n coordinates : array-like, optional\n The coordinate values corresponding to the grid points in `f`\n deltas : array-like, optional\n Spacing between the grid points in `f`. There should be one item less than the size\n of `f` along the applicable axis.\n axes : sequence, optional\n Sequence of strings (if `f` is a `xarray.DataArray` and implicit conversion to\n `pint.Quantity` is not used) or integers that specify the array axes along which to\n take the derivatives. Defaults to all axes of `f`. If given, and used with\n `coordinates` or `deltas`, its length must be less than or equal to that of the\n `coordinates` or `deltas` given.\n\n Returns\n -------\n array-like\n The laplacian\n\n See Also\n --------\n gradient, second_derivative\n\n Notes\n -----\n If this function is used without the `axes` parameter, the length of `coordinates` or\n `deltas` (as applicable) should match the number of dimensions of `f`.\n\n \"\"\"\n pos_kwarg, positions, axes = _process_gradient_args(f, kwargs)\n derivs = [second_derivative(f, axis=axis, **{pos_kwarg: positions[ind]})\n for ind, axis in enumerate(axes)]\n laplac = sum(derivs)\n if isinstance(derivs[0], xr.DataArray):\n # Patch in the units that are dropped\n laplac.attrs['units'] = derivs[0].attrs['units']\n return laplac\n\n\ndef _broadcast_to_axis(arr, axis, ndim):\n \"\"\"Handle reshaping coordinate array to have proper dimensionality.\n\n This puts the values along the specified axis.\n \"\"\"\n if arr.ndim == 1 and arr.ndim < ndim:\n new_shape = [1] * ndim\n new_shape[axis] = arr.size\n arr = arr.reshape(*new_shape)\n return arr\n\n\ndef _process_gradient_args(f, kwargs):\n \"\"\"Handle common processing of arguments for gradient and gradient-like functions.\"\"\"\n axes = kwargs.get('axes', range(f.ndim))\n\n def _check_length(positions):\n if 'axes' in kwargs and len(positions) < len(axes):\n raise ValueError('Length of \"coordinates\" or \"deltas\" cannot be less than that '\n 'of \"axes\".')\n elif 'axes' not in kwargs and len(positions) != len(axes):\n raise ValueError('Length of \"coordinates\" or \"deltas\" must match the number of '\n 'dimensions of \"f\" when \"axes\" is not given.')\n\n if 'deltas' in kwargs:\n if 'coordinates' in kwargs or 'x' in kwargs:\n raise ValueError('Cannot specify both \"coordinates\" and \"deltas\".')\n _check_length(kwargs['deltas'])\n return 'delta', kwargs['deltas'], axes\n elif 'coordinates' in kwargs:\n _check_length(kwargs['coordinates'])\n return 'x', kwargs['coordinates'], axes\n elif isinstance(f, xr.DataArray):\n return 'pass', axes, axes # only the axis argument matters\n else:\n raise ValueError('Must specify either \"coordinates\" or \"deltas\" for value positions '\n 'when \"f\" is not a DataArray.')\n\n\ndef _process_deriv_args(f, kwargs):\n \"\"\"Handle common processing of arguments for derivative functions.\"\"\"\n n = f.ndim\n axis = normalize_axis_index(kwargs.get('axis', 0), n)\n\n if f.shape[axis] < 3:\n raise ValueError('f must have at least 3 point along the desired axis.')\n\n if 'delta' in kwargs:\n if 'x' in kwargs:\n raise ValueError('Cannot specify both \"x\" and \"delta\".')\n\n delta = atleast_1d(kwargs['delta'])\n if delta.size == 1:\n diff_size = list(f.shape)\n diff_size[axis] -= 1\n delta_units = getattr(delta, 'units', None)\n delta = np.broadcast_to(delta, diff_size, subok=True)\n if not hasattr(delta, 'units') and delta_units is not None:\n delta = delta * delta_units\n else:\n delta = _broadcast_to_axis(delta, axis, n)\n elif 'x' in kwargs:\n x = _broadcast_to_axis(kwargs['x'], axis, n)\n delta = diff(x, axis=axis)\n else:\n raise ValueError('Must specify either \"x\" or \"delta\" for value positions.')\n\n return n, axis, delta\n\n\n@exporter.export\n@preprocess_xarray\ndef parse_angle(input_dir):\n \"\"\"Calculate the meteorological angle from directional text.\n\n Works for abbrieviations or whole words (E -> 90 | South -> 180)\n and also is able to parse 22.5 degreee angles such as ESE/East South East\n\n Parameters\n ----------\n input_dir : string or array-like\n Directional text such as west, [south-west, ne], etc\n\n Returns\n -------\n `pint.Quantity`\n The angle in degrees\n\n \"\"\"\n if isinstance(input_dir, str):\n # abb_dirs = abbrieviated directions\n abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)])\n elif hasattr(input_dir, '__len__'): # handle np.array, pd.Series, list, and array-like\n input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True))\n abb_dir_str = _abbrieviate_direction(input_dir_str)\n abb_dirs = _clean_direction(abb_dir_str.split(','))\n else: # handle unrecognizable scalar\n return np.nan\n\n return itemgetter(*abb_dirs)(DIR_DICT)\n\n\ndef _clean_direction(dir_list, preprocess=False):\n \"\"\"Handle None if preprocess, else handles anything not in DIR_STRS.\"\"\"\n if preprocess: # primarily to remove None from list so ','.join works\n return [UND if not isinstance(the_dir, str) else the_dir\n for the_dir in dir_list]\n else: # remove extraneous abbrieviated directions\n return [UND if the_dir not in DIR_STRS else the_dir\n for the_dir in dir_list]\n\n\ndef _abbrieviate_direction(ext_dir_str):\n \"\"\"Convert extended (non-abbrievated) directions to abbrieviation.\"\"\"\n return (ext_dir_str\n .upper()\n .replace('_', '')\n .replace('-', '')\n .replace(' ', '')\n .replace('NORTH', 'N')\n .replace('EAST', 'E')\n .replace('SOUTH', 'S')\n .replace('WEST', 'W')\n )\n\n\n@exporter.export\n@preprocess_xarray\ndef angle_to_direction(input_angle, full=False, level=3):\n \"\"\"Convert the meteorological angle to directional text.\n\n Works for angles greater than or equal to 360 (360 -> N | 405 -> NE)\n and rounds to the nearest angle (355 -> N | 404 -> NNE)\n\n Parameters\n ----------\n input_angle : numeric or array-like numeric\n Angles such as 0, 25, 45, 360, 410, etc\n full : boolean\n True returns full text (South), False returns abbrieviated text (S)\n level : int\n Level of detail (3 = N/NNE/NE/ENE/E... 2 = N/NE/E/SE... 1 = N/E/S/W)\n\n Returns\n -------\n direction\n The directional text\n\n \"\"\"\n try: # strip units temporarily\n origin_units = input_angle.units\n input_angle = input_angle.m\n except AttributeError: # no units associated\n origin_units = units.degree\n\n if not hasattr(input_angle, '__len__') or isinstance(input_angle, str):\n input_angle = [input_angle]\n scalar = True\n else:\n scalar = False\n\n # clean any numeric strings, negatives, and None\n # does not handle strings with alphabet\n input_angle = np.array(input_angle).astype(float)\n with np.errstate(invalid='ignore'): # warns about the np.nan\n input_angle[np.where(input_angle < 0)] = np.nan\n\n input_angle = input_angle * origin_units\n\n # normalizer used for angles > 360 degree to normalize between 0 - 360\n normalizer = np.array(input_angle.m / MAX_DEGREE_ANGLE.m, dtype=int)\n norm_angles = abs(input_angle - MAX_DEGREE_ANGLE * normalizer)\n\n if level == 3:\n nskip = 1\n elif level == 2:\n nskip = 2\n elif level == 1:\n nskip = 4\n else:\n err_msg = 'Level of complexity cannot be less than 1 or greater than 3!'\n raise ValueError(err_msg)\n\n angle_dict = {i * BASE_DEGREE_MULTIPLIER.m * nskip: dir_str\n for i, dir_str in enumerate(DIR_STRS[::nskip])}\n angle_dict[MAX_DEGREE_ANGLE.m] = 'N' # handle edge case of 360.\n angle_dict[UND_ANGLE] = UND\n\n # round to the nearest angles for dict lookup\n # 0.001 is subtracted so there's an equal number of dir_str from\n # np.arange(0, 360, 22.5), or else some dir_str will be preferred\n\n # without the 0.001, level=2 would yield:\n # ['N', 'N', 'NE', 'E', 'E', 'E', 'SE', 'S', 'S',\n # 'S', 'SW', 'W', 'W', 'W', 'NW', 'N']\n\n # with the -0.001, level=2 would yield:\n # ['N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',\n # 'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW']\n\n multiplier = np.round(\n (norm_angles / BASE_DEGREE_MULTIPLIER / nskip) - 0.001).m\n round_angles = (multiplier * BASE_DEGREE_MULTIPLIER.m * nskip)\n round_angles[np.where(np.isnan(round_angles))] = UND_ANGLE\n\n dir_str_arr = itemgetter(*round_angles)(angle_dict) # for array\n if full:\n dir_str_arr = ','.join(dir_str_arr)\n dir_str_arr = _unabbrieviate_direction(dir_str_arr)\n if not scalar:\n dir_str = dir_str_arr.split(',')\n else:\n dir_str = dir_str_arr.replace(',', ' ')\n else:\n dir_str = dir_str_arr\n\n return dir_str\n\n\ndef _unabbrieviate_direction(abb_dir_str):\n \"\"\"Convert abbrieviated directions to non-abbrieviated direction.\"\"\"\n return (abb_dir_str\n .upper()\n .replace(UND, 'Undefined ')\n .replace('N', 'North ')\n .replace('E', 'East ')\n .replace('S', 'South ')\n .replace('W', 'West ')\n .replace(' ,', ',')\n ).strip()\n\n\ndef _remove_nans(*variables):\n \"\"\"Remove NaNs from arrays that cause issues with calculations.\n\n Takes a variable number of arguments\n Returns masked arrays in the same order as provided\n \"\"\"\n mask = None\n for v in variables:\n if mask is None:\n mask = np.isnan(v)\n else:\n mask |= np.isnan(v)\n\n # Mask everyone with that joint mask\n ret = []\n for v in variables:\n ret.append(v[~mask])\n return ret\n" ]
[ [ "scipy.spatial.cKDTree", "numpy.isclose", "numpy.exp", "numpy.min", "numpy.sign", "numpy.where", "numpy.broadcast_to", "numpy.empty", "numpy.log", "numpy.ma.asarray", "numpy.nanmin", "numpy.append", "numpy.nanmax", "numpy.array", "numpy.round", "numpy.ma.getmaskarray", "numpy.argsort", "numpy.isnan", "numpy.errstate", "numpy.any", "numpy.ediff1d", "numpy.atleast_1d", "numpy.abs", "numpy.ma.is_masked", "numpy.meshgrid" ] ]
iDataAstro/MNIST_CLASSIFICATION
[ "a1114f9f990be13f76ba77ddc1e9afd894c7c101" ]
[ "src/utils/model.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.models import Model\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport os\nimport logging\n\nfrom .common import create_directories\n\n\ndef get_prepared_model(stage: str, no_classes: int, input_shape: list, loss: str, optimizer: str, metrics: list) -> \\\n Model:\n \"\"\"Function creates ANN model and compile.\n Args:\n stage ([str]): stage of experiment\n no_classes ([INT]): No of classes for classification\n input_shape ([int, int]): Input shape for model's input layer\n loss ([str]): Loss function for model\n optimizer ([str]): Optimizer for model\n metrics ([str]): Metrics to watch while training\n Returns:\n model: ANN demo model\n \"\"\"\n # Define layers\n LAYERS = []\n BASE_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, activation='relu', name='hidden1'),\n tf.keras.layers.Dense(units=196, activation='relu', name='hidden2'),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n KERNEL_INIT_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n BN_BEFORE_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, name='hidden1', kernel_initializer='glorot_uniform'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(units=196, name='hidden2', kernel_initializer='glorot_uniform'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n BN_AFTER_LAYERS = [\n tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),\n tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',\n bias_initializer='zeros'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')\n ]\n\n logging.info(\"Creating Model..\")\n if stage == 'BASE_MODEL':\n LAYERS = BASE_LAYERS\n elif stage == 'KERNEL_INIT_MODEL':\n LAYERS = KERNEL_INIT_LAYERS\n elif stage == 'BN_BEFORE_MODEL':\n LAYERS = BN_BEFORE_LAYERS\n elif stage == 'BN_AFTER_MODEL':\n LAYERS = BN_AFTER_LAYERS\n\n model_ann = tf.keras.models.Sequential(LAYERS)\n\n logging.info(\"Compiling Model..\")\n model_ann.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n\n return model_ann\n\n\ndef save_model(model_dir: str, model: Model, model_suffix: str) -> None:\n \"\"\"\n args:\n model_dir: directory to save the model\n model: model object to save\n model_suffix: Suffix to save the model\n \"\"\"\n create_directories([model_dir])\n model_file = os.path.join(model_dir, f\"{model_suffix}.h5\")\n model.save(model_file)\n logging.info(f\"Saved model: {model_file}\")\n\n\ndef save_history_plot(history, plot_dir: str, stage: str) -> None:\n \"\"\"\n Args:\n history: History object for plotting loss/accuracy curves\n plot_dir: Directory to save plot files\n stage: Stage name for training\n \"\"\"\n pd.DataFrame(history.history).plot(figsize=(10, 8))\n plt.grid(True)\n create_directories([plot_dir])\n plot_file = os.path.join(plot_dir, stage + \"_loss_accuracy.png\")\n plt.savefig(plot_file)\n logging.info(f\"Loss accuracy plot saved: {plot_file}\")\n\n\ndef get_callbacks(checkpoint_dir: str, tensorboard_logs: str, stage: str) -> list:\n \"\"\"\n Args:\n checkpoint_dir: Directory to save the model at checkpoint\n tensorboard_logs: Directory to save tensorboard logs\n stage: Stage name for training\n Returns:\n callback_list: List of created callbacks\n \"\"\"\n create_directories([checkpoint_dir, tensorboard_logs])\n tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_logs)\n early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)\n ckpt_file_path = os.path.join(checkpoint_dir, f\"{stage}_ckpt_model.h5\")\n checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file_path, save_best_only=True)\n\n callback_list = [tensorboard_cb, early_stopping_cb, checkpoint_cb]\n logging.info(f\"Callbacks created: {callback_list}\")\n return callback_list\n" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.layers.Flatten", "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig", "tensorflow.keras.layers.Activation", "pandas.DataFrame", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.layers.Dense", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.callbacks.EarlyStopping" ] ]
saugatkandel/cvnn
[ "f6d7b5c17fd064a7eaa60e7af922914a974eb69a" ]
[ "cvnn/montecarlo.py" ]
[ "import logging\nimport os\nimport json\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport datetime\nfrom pdb import set_trace\nfrom time import sleep\nfrom openpyxl import load_workbook, Workbook\nfrom openpyxl.worksheet.table import Table\nfrom tensorflow.keras.losses import categorical_crossentropy\nfrom tensorflow.keras import Model\n# Own modules\nimport cvnn\nimport cvnn.layers as layers\nimport cvnn.dataset as dp\nfrom cvnn.data_analysis import MonteCarloAnalyzer, Plotter, get_confusion_matrix\nfrom cvnn.layers import ComplexDense, ComplexDropout\nfrom cvnn.utils import transform_to_real, randomize, transform_to_real_map_function\nfrom cvnn.real_equiv_tools import get_real_equivalent\nfrom cvnn.utils import median_error\nfrom cvnn.initializers import ComplexGlorotUniform\n# typing\nfrom pathlib import Path\nfrom typing import Union, Optional, List, Tuple\nfrom cvnn.activations import t_activation\nfrom tensorflow import data\nfrom typing import Type\n\nlogger = logging.getLogger(cvnn.__name__)\nDEFAULT_OUTPUT_ACT = 'softmax_real_with_abs'\nt_path = Union[str, Path]\n\n\nclass MonteCarlo:\n\n def __init__(self):\n \"\"\"\n Class that allows the statistical comparison of several models on the same dataset\n \"\"\"\n self.models = []\n self.pandas_full_data = pd.DataFrame()\n self.monte_carlo_analyzer = MonteCarloAnalyzer() # All at None\n self.verbose = 1\n self.output_config = {\n 'plot_all': False,\n 'confusion_matrix': False,\n 'excel_summary': True,\n 'summary_of_run': True,\n 'tensorboard': False,\n 'save_weights': False,\n 'safety_checkpoints': False\n }\n\n def add_model(self, model: Type[Model]):\n \"\"\"\n Adds a cvnn.CvnnModel to the list to then compare between them\n \"\"\"\n self.models.append(model)\n\n @staticmethod\n def _parse_verbose(verbose: Union[str, int, bool]) -> int:\n if isinstance(verbose, bool):\n verbose = 2 if verbose else 1\n elif isinstance(verbose, str):\n if verbose.lower() == 'silent':\n verbose = 0\n elif verbose.lower() == 'debug':\n verbose = 2\n else:\n raise ValueError(f\"Unknown verbose mode {verbose}\")\n else:\n try:\n verbose = int(verbose)\n if verbose > 2 or verbose < 0:\n raise ValueError(f\"verbose should be one of 0, 1 or 2, received {verbose}\")\n except Exception as e:\n raise ValueError(f\"Cannot cast verbose = {verbose} to int\")\n return verbose\n\n def run(self, x, y, data_summary: str = '',\n real_cast_modes: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,\n validation_split: float = 0.2,\n validation_data: Optional[Union[Tuple[np.ndarray, np.ndarray], data.Dataset]] = None,\n # TODO: Add the tuple of validation data details.\n test_data: Optional[Union[Tuple[np.ndarray, np.ndarray], data.Dataset]] = None,\n iterations: int = 100, epochs: int = 10, batch_size: int = 100, early_stop: bool = False,\n shuffle: bool = True, verbose: Optional[Union[bool, int, str]] = 1, display_freq: int = 1,\n same_weights: bool = False, process_dataset: bool = True):\n \"\"\"\n This function is used to compare all models added with `self.add_model` method.\n Runs the iteration dataset (x, y).\n 1. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.\n 2. Saves several files into ./log/montecarlo/date/of/run/\n 2.1. run_summary.txt: Summary of the run models and data\n 2.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 2.3. <model.name>_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 2.4. (Optional with parameter plot_all)\n `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n :param x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs).\n - A tf.data dataset. Should return a tuple (inputs, targets). Preferred data type (less overhead).\n :param y: Labels/Target data. Like the input data x, it could be either Numpy array(s) or TensorFlow tensor(s).\n If f x is a dataset then y will be ignored (default None)\n :param data_summary: (String) Dataset name to keep track of it\n :param real_cast_modes: mode parameter used by cvnn.utils.transform_to_real to be used when the model to\n train is real-valued. One of the following:\n - String with the mode listed in cvnn.utils.transform_to_real to be used by all the real-valued models to\n cast complex data to real.\n - List or Tuple of strings: Same size of self.models. mode on how to cast complex data to real for each\n model in self.model.\n real_cast_modes[i] will indicate how to cast data for self.models[i] (ignored when model is complex).\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param test_data: Data on which to evaluate the loss and any model metrics at the end of a model training.\n The model will not be trained on this data.\n If test data is not None (default) it will generate a file called `test_results.csv` with the\n statistical results from the test data.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Integer (Default 1). Only relevant if validation data is provided.\n Frequency on terms of epochs before running the validation.\n :param shuffle: (Boolean) Whether to shuffle the training data before each epoch.\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param early_stop: (Default: False) Wheather to implement early stop on training.\n :param same_weights: (Default False) If True it will use the same weights at each iteration.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n \"\"\"\n if verbose:\n self.verbose = self._parse_verbose(verbose)\n test_data_cols = None\n if test_data is not None:\n test_data_cols = ['network'] + [n.get_config()['name'] for n in self.models[0].metrics]\n real_cast_modes = self._check_real_cast_modes(real_cast_modes)\n confusion_matrix, pbar, test_results = self._beginning_callback(iterations, epochs, batch_size,\n shuffle, data_summary, test_data_cols)\n w_save = [] # TODO: Find a better method\n for model in self.models: # ATTENTION: This will make all models have the SAME weights, not ideal\n w_save.append(model.get_weights()) # Save model weight\n # np.save(self.monte_carlo_analyzer.path / \"initial_debug_weights.npy\", np.array(w_save)) # TODO\n for it in range(iterations):\n if self.verbose == 2:\n logger.info(\"Iteration {}/{}\".format(it + 1, iterations))\n for i, model in enumerate(self.models):\n x_fit, val_data_fit, test_data_fit = self._get_fit_dataset(model.inputs[0].dtype.is_complex, x,\n validation_data, test_data,\n real_cast_modes[i],\n process_dataset=process_dataset)\n clone_model = tf.keras.models.clone_model(model)\n if isinstance(model.loss, tf.keras.losses.Loss):\n loss = model.loss.__class__.from_config(config=model.loss.get_config())\n else:\n loss = model.loss\n clone_model.compile(optimizer=model.optimizer.__class__.from_config(model.optimizer.get_config()),\n loss=loss,\n metrics=['accuracy']) # TODO: Until the issue is solved, I need to force metrics\n # https://github.com/tensorflow/tensorflow/issues/40030\n # https://stackoverflow.com/questions/62116136/tensorflow-keras-metrics-not-showing/69193373\n if same_weights:\n clone_model.set_weights(w_save[i])\n temp_path = self.monte_carlo_analyzer.path / f\"run/iteration{it}_model{i}_{model.name}\"\n os.makedirs(temp_path, exist_ok=True)\n callbacks = []\n if self.output_config['tensorboard']:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=temp_path / 'tensorboard',\n histogram_freq=1)\n callbacks.append(tensorboard_callback)\n if early_stop:\n eas = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n callbacks.append(eas)\n run_result = clone_model.fit(x_fit, y, validation_split=validation_split, validation_data=val_data_fit,\n epochs=epochs, batch_size=batch_size,\n verbose=self.verbose==2, validation_freq=display_freq,\n callbacks=callbacks, shuffle=shuffle)\n test_results = self._inner_callback(clone_model, validation_data, confusion_matrix, real_cast_modes[i],\n i, run_result, test_results, test_data_fit, temp_path)\n self._outer_callback(pbar)\n return self._end_callback(x, y, iterations, data_summary, real_cast_modes, epochs, batch_size,\n confusion_matrix, test_results, pbar, w_save)\n # TODO: What was the idea of save_weights? Is it necessary or it was only debugging?\n\n def _check_real_cast_modes(self, real_cast_modes):\n # TODO: I can check the real models input size corresponds to the real_cast_mode. And change it with a warning?\n if real_cast_modes is None:\n real_cast_modes = \"real_imag\"\n if isinstance(real_cast_modes, str):\n real_cast_modes = [real_cast_modes for _ in self.models]\n # I suppose then real_cast_modes is a list or tuple. Not checked TODO\n assert len(real_cast_modes) == len(self.models), \"Size of real_cast_modes should be equal to the total models\"\n return real_cast_modes\n\n @staticmethod\n def _transform_dataset(is_complex: bool, validation_data, polar):\n val_data_fit = None\n if validation_data is not None:\n if isinstance(validation_data, tf.data.Dataset):\n if not is_complex:\n val_data_fit = validation_data.map(lambda x, y: transform_to_real_map_function(x, y, mode=polar))\n else:\n val_data_fit = validation_data\n elif (is_complex and validation_data[0].dtype.is_complex) or \\\n (not is_complex and validation_data[0].dtype.is_floating):\n val_data_fit = validation_data\n elif is_complex and not validation_data[0].dtype.is_complex:\n raise NotImplementedError(f\"The input dataset is expected to be complex\")\n else:\n val_data_fit = (transform_to_real(validation_data[0], mode=polar), validation_data[1])\n return val_data_fit\n\n def _get_fit_dataset(self, is_complex: bool, x, validation_data, test_data, polar, process_dataset):\n if not process_dataset:\n return x, validation_data, test_data\n if isinstance(x, tf.data.Dataset):\n if not is_complex:\n x_fit = x.map(lambda imag, label: transform_to_real_map_function(imag, label, mode=polar))\n else:\n x_fit = x\n elif (is_complex and tf.dtypes.as_dtype(x.dtype).is_complex) or \\\n (not is_complex and tf.dtypes.as_dtype(x.dtype).is_floating):\n x_fit = x\n elif is_complex and not tf.dtypes.as_dtype(x.dtype).is_complex:\n raise NotImplementedError(f\"Cast real dataset to complex not yet implemented, \"\n f\"please provide the dataset in complex form.\")\n else:\n x_fit = transform_to_real(x, mode=polar)\n val_data_fit = self._transform_dataset(is_complex, validation_data, polar)\n test_data_fit = self._transform_dataset(is_complex, test_data, polar)\n return x_fit, val_data_fit, test_data_fit\n\n # Callbacks\n def _beginning_callback(self, iterations, epochs, batch_size, shuffle, data_summary, test_data_cols):\n confusion_matrix = None\n pbar = None\n # Reset data frame\n self.pandas_full_data = pd.DataFrame()\n if self.verbose == 1:\n pbar = tqdm(total=iterations)\n if self.output_config['confusion_matrix']:\n confusion_matrix = []\n for mdl in self.models:\n confusion_matrix.append({\"name\": mdl.name, \"matrix\": pd.DataFrame()})\n if self.output_config['summary_of_run']:\n self._save_summary_of_run(self._run_summary(iterations, epochs, batch_size, shuffle), data_summary)\n test_results = None\n if test_data_cols is not None:\n # TODO: Consider making it a tuple so it is unmutable\n test_results = pd.DataFrame(columns=test_data_cols)\n return confusion_matrix, pbar, test_results\n\n def _end_callback(self, x, y, iterations, data_summary, polar, epochs, batch_size,\n confusion_matrix, test_results, pbar, w_save):\n if self.verbose == 1:\n pbar.close()\n self.pandas_full_data = self.pandas_full_data.reset_index(drop=True)\n self.monte_carlo_analyzer.set_df(self.pandas_full_data)\n if self.output_config['save_weights']:\n np.save(self.monte_carlo_analyzer.path / \"initial_weights.npy\", np.array(w_save))\n if self.output_config['excel_summary']:\n try: # TODO: Think this better\n num_classes = str(y.shape[1])\n except IndexError:\n num_classes = max(y) - min(y)\n self._save_montecarlo_log(iterations=iterations,\n dataset_name=data_summary,\n num_classes=num_classes, polar_mode='Yes' if polar else 'No',\n dataset_size=str(x.shape[0]), features_size=str(x.shape[1:]),\n epochs=epochs, batch_size=batch_size\n )\n if self.output_config['confusion_matrix']:\n if confusion_matrix is not None:\n for model_cm in confusion_matrix:\n # If the first prediction does not predict a given class, the order will be wrong, so I sort it.\n cm = model_cm['matrix']\n cols = cm.columns.tolist()\n strs = list(filter(lambda x: type(x) == str, cols))\n ints = list(filter(lambda x: type(x) == int, cols))\n ints.sort()\n strs.sort()\n cm_sorted = cm.fillna(0)[ints + strs] # Sorted confusion matrix\n model_cm['matrix'] = cm_sorted.groupby(cm_sorted.index).mean()\n model_cm['matrix'].to_csv(\n self.monte_carlo_analyzer.path / (model_cm['name'] + \"_confusion_matrix.csv\"))\n if test_results is not None:\n test_results.groupby('network').describe().to_csv(self.monte_carlo_analyzer.path / (\"test_results.csv\"))\n if self.output_config['plot_all']:\n return self.monte_carlo_analyzer.do_all()\n\n def _inner_callback(self, model, validation_data, confusion_matrix, polar, model_index,\n run_result, test_results, test_data_fit, temp_path):\n # TODO: Must have save_csv_history to do the montecarlo results latter\n # Save all results\n plotter = Plotter(path=temp_path, data_results_dict=run_result.history, model_name=model.name)\n self.pandas_full_data = pd.concat([self.pandas_full_data, plotter.get_full_pandas_dataframe()], sort=False)\n if self.output_config['confusion_matrix']:\n if validation_data is not None: # TODO: Haven't yet done all cases here!\n if model.inputs[0].dtype.is_complex:\n x_test, y_test = validation_data\n else:\n x_test, y_test = (transform_to_real(validation_data[0], mode=polar), validation_data[1])\n try:\n confusion_matrix[model_index][\"matrix\"] = pd.concat((confusion_matrix[model_index][\"matrix\"],\n get_confusion_matrix(model.predict(x_test),\n y_test)))\n except ValueError:\n logger.warning(\"ValueError: Could not do confusion matrix. No objects to concatenate.\")\n # TODO: I think confusion matrix stopped working.\n else:\n print(\"Confusion matrix only available for validation_data\")\n if self.output_config['save_weights']:\n # model.save_weights(temp_path / \"final_weights\")\n np.save(temp_path / \"final_weights.npy\", model.get_weights())\n if test_results is not None:\n tmp_result = [model.name] + model.evaluate(x=test_data_fit[0], y=test_data_fit[1], verbose=0)\n cols = ['network'] + [n.get_config()['name'] for n in self.models[0].metrics]\n test_results = test_results.append(pd.DataFrame([tmp_result], columns=cols), ignore_index=True)\n return test_results\n\n def _outer_callback(self, pbar):\n if self.verbose == 1:\n pbar.update()\n if self.output_config['safety_checkpoints']:\n # Save checkpoint in case Monte Carlo stops in the middle\n self.pandas_full_data.to_csv(self.monte_carlo_analyzer.path / \"run_data.csv\", index=False)\n\n # Saver functions\n def _save_montecarlo_log(self, iterations, dataset_name, num_classes, polar_mode, dataset_size,\n features_size, epochs, batch_size):\n fieldnames = [\n 'iterations',\n 'dataset', '# Classes', \"Dataset Size\", 'Feature Size', # Dataset information\n 'models', 'epochs', 'batch size', \"Polar Mode\", # Models information\n 'path', \"cvnn version\" # Library information\n ]\n row_data = [\n iterations,\n dataset_name, num_classes, dataset_size, features_size,\n '-'.join([str(model.name) for model in self.models]), epochs, batch_size, polar_mode,\n str(self.monte_carlo_analyzer.path), cvnn.__version__\n ]\n _create_excel_file(fieldnames, row_data, './log/monte_carlo_summary.xlsx')\n\n @staticmethod\n def _run_summary(iterations: int, epochs: int, batch_size: int, shuffle: bool) -> str:\n ret_str = \"Monte Carlo run\\n\"\n ret_str += f\"\\tIterations: {iterations}\\n\"\n ret_str += f\"\\tepochs: {epochs}\\n\"\n ret_str += f\"\\tbatch_size: {batch_size}\\n\"\n if shuffle:\n ret_str += \"\\tShuffle data at each iteration\\n\"\n else:\n ret_str += \"\\tData is not shuffled at each iteration\\n\"\n return ret_str\n\n def _save_summary_of_run(self, run_summary, data_summary):\n \"\"\"\n Saves 2 files:\n - run_summary.txt: A user-friendly resume of the monte carlo run.\n - models_details.json: A full serialized version of the models.\n Contains info that lacks in the txt file like the loss or optimizer.\n \"\"\"\n with open(str(self.monte_carlo_analyzer.path / \"run_summary.txt\"), \"w\") as file:\n file.write(run_summary)\n file.write(data_summary + \"\\n\")\n file.write(\"Models:\\n\")\n for model in self.models:\n model.summary(print_fn=lambda x: file.write(x + '\\n'))\n json_dict = {}\n for i, model in enumerate(self.models):\n json_dict[str(i)] = {\n 'name': model.name,\n 'loss': model.loss if isinstance(model.loss, str) else model.loss.get_config(), # Not yet support function loss\n 'optimizer': model.optimizer.get_config(),\n 'layers': [layer.get_config() for layer in model.layers]\n }\n with open(self.monte_carlo_analyzer.path / 'models_details.json', 'w') as fp:\n json.dump(str(json_dict), fp)\n\n\nclass RealVsComplex(MonteCarlo):\n \"\"\"\n Inherits from MonteCarlo. Compares a complex model with it's real equivalent.\n\n Example usage:\n ```\n # Assume you already have complex data 'x' with its labels 'y'... and a Cvnn model.\n\n montecarlo = RealVsComplex(complex_model)\n montecarlo.run(x, y)\n ```\n \"\"\"\n\n def __init__(self, complex_model: Type[Model], capacity_equivalent: bool = True, equiv_technique: str = 'ratio'):\n \"\"\"\n :param complex_model: Complex keras model (ex: sequential)\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)\n - True, it creates a capacity-equivalent model in terms of trainable parameters\n - False, it will double all layer size (except the last one if classifier=True)\n :param equiv_technique: Used to define the strategy of the capacity equivalent model.\n This parameter is ignored if capacity_equivalent=False\n - 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'\n - 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between\n multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.\n \"\"\"\n super().__init__()\n # add models\n self.add_model(complex_model)\n self.add_model(get_real_equivalent(complex_model, capacity_equivalent=capacity_equivalent,\n equiv_technique=equiv_technique, name=\"real_network\"))\n\n def _save_montecarlo_log(self, iterations, dataset_name, num_classes, polar_mode, dataset_size,\n features_size, epochs, batch_size):\n max_epoch = self.pandas_full_data['epoch'].max()\n epoch_filter = self.pandas_full_data['epoch'] == max_epoch\n complex_filter = self.pandas_full_data['network'] == self.models[0].name\n real_filter = self.pandas_full_data['network'] == self.models[1].name\n complex_last_epochs = self.pandas_full_data[epoch_filter & complex_filter]\n real_last_epochs = self.pandas_full_data[epoch_filter & real_filter]\n complex_median_train = complex_last_epochs['accuracy'].median()\n real_median_train = real_last_epochs['accuracy'].median()\n complex_median = complex_last_epochs['val_accuracy'].median()\n real_median = real_last_epochs['val_accuracy'].median()\n complex_err = median_error(complex_last_epochs['accuracy'].quantile(.75),\n complex_last_epochs['accuracy'].quantile(.25), iterations)\n real_err = median_error(real_last_epochs['val_accuracy'].quantile(.75),\n real_last_epochs['val_accuracy'].quantile(.25), iterations)\n fieldnames = ['iterations', 'dataset', '# Classes', \"Dataset Size\", 'Feature Size', \"Polar Mode\",\n \"Optimizer\", \"Loss\",\n 'epochs', 'batch size',\n \"Winner\", \"CVNN val median\", \"RVNN val median\", 'CVNN err', 'RVNN err',\n \"CVNN train median\", \"RVNN train median\",\n 'path', \"cvnn version\"\n ]\n row_data = [iterations, dataset_name, num_classes, dataset_size, features_size, polar_mode,\n # Dataset information\n str(tf.keras.losses.serialize(self.models[0].loss)),\n str(tf.keras.optimizers.serialize(self.models[0].optimizer)),\n epochs, batch_size, # Model information\n 'CVNN' if complex_median > real_median else 'RVNN',\n complex_median, real_median, complex_err, real_err, # Preliminary results\n complex_median_train, real_median_train,\n str(self.monte_carlo_analyzer.path), cvnn.__version__\n ]\n percentage_cols = ['P', 'Q', 'L', 'M']\n _create_excel_file(fieldnames, row_data, './log/rvnn_vs_cvnn_monte_carlo_summary.xlsx',\n percentage_cols=percentage_cols)\n\n\n# ====================================\n# Monte Carlo simulation methods\n# ====================================\n\ndef run_gaussian_dataset_montecarlo(iterations: int = 30, m: int = 10000, n: int = 128, param_list=None,\n epochs: int = 300, batch_size: int = 100, display_freq: int = 1,\n optimizer='sgd', validation_split: float = 0.2, # TODO: Add typing here\n shape_raw: List[int] = None, activation: t_activation = 'cart_relu',\n verbose: bool = False, do_all: bool = True, tensorboard: bool = False,\n polar: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,\n capacity_equivalent: bool = True, equiv_technique: str = 'ratio',\n dropout: Optional[float] = None, models: Optional[List[Model]] = None,\n plot_data: bool = True, early_stop: bool = False, shuffle: bool = True) -> str:\n \"\"\"\n This function is used to compare CVNN vs RVNN performance over statistical non-circular data.\n 1. Generates a complex-valued gaussian correlated noise with the characteristics given by the inputs.\n 2. It then runs a monte carlo simulation of several iterations of both CVNN and an equivalent RVNN model.\n 3. Saves several files into ./log/montecarlo/date/of/run/\n 3.1. run_summary.txt: Summary of the run models and data\n 3.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch\n 3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n :param iterations: Number of iterations to be done for each model\n :param m: Total size of the dataset (number of examples)\n :param n: Number of features / input vector\n :param param_list: A list of len = number of classes.\n Each element of the list is another list of len = 3 with values: [correlation_coeff, sigma_x, sigma_y]\n Example for dataset type A of paper https://arxiv.org/abs/2009.08340:\n param_list = [\n [0.5, 1, 1],\n [-0.5, 1, 1]\n ]\n Default: None will default to the example.\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param optimizer: Optimizer to be used. Keras optimizers are not allowed.\n Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.\n :param validation_split: float between 0 and 1. Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss\n and any model metrics on this data at the end of each epoch.\n The validation data is selected from the last samples in the x and y data provided, before shuffling.\n This argument is not supported when x is a dataset, generator or keras.utils.Sequence instance.\n :param shape_raw: List of sizes of each hidden layer.\n For example [64] will generate a CVNN with one hidden layer of size 64.\n Default None will default to example.\n :param activation: Activation function to be used at each hidden layer\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param tensorboard: If True, it will generate tensorboard outputs to check training values.\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.\n :param models: List of models to be compared.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n \"\"\"\n # Get parameters\n if param_list is None:\n param_list = [\n [0.3, 1, 1],\n [-0.3, 1, 1]\n ]\n dataset = dp.CorrelatedGaussianCoeffCorrel(m, n, param_list, debug=False)\n print(\"Database loaded...\")\n if models is not None:\n return run_montecarlo(models=models, dataset=dataset, open_dataset=None,\n iterations=iterations, epochs=epochs, batch_size=batch_size, display_freq=display_freq,\n validation_split=validation_split, validation_data=None,\n verbose=verbose, polar=polar, do_all=do_all, tensorboard=tensorboard, do_conf_mat=False,\n plot_data=plot_data, early_stop=early_stop, shuffle=shuffle)\n else:\n return mlp_run_real_comparison_montecarlo(dataset=dataset, open_dataset=None, iterations=iterations,\n epochs=epochs, batch_size=batch_size, display_freq=display_freq,\n optimizer=optimizer, shape_raw=shape_raw, activation=activation,\n verbose=verbose, polar=polar, do_all=do_all,\n tensorboard=tensorboard,\n capacity_equivalent=capacity_equivalent,\n equiv_technique=equiv_technique,\n dropout=dropout, validation_split=validation_split,\n plot_data=plot_data)\n\n\ndef run_montecarlo(models: List[Model], dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path] = None,\n iterations: int = 30,\n epochs: int = 300, batch_size: int = 100, display_freq: int = 1,\n validation_split: float = 0.2,\n validation_data: Optional[Union[Tuple, data.Dataset]] = None,\n # TODO: Add vallidation data tuple details\n verbose: Union[bool, int] = False, do_conf_mat: bool = False, do_all: bool = True,\n tensorboard: bool = False,\n polar: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,\n plot_data: bool = False, early_stop: bool = False, shuffle: bool = True,\n preprocess_data: bool = True) -> str:\n \"\"\"\n This function is used to compare different neural networks performance.\n 1. Runs simulation and compares them.\n 2. Saves several files into ./log/montecarlo/date/of/run/\n 2.1. run_summary.txt: Summary of the run models and data\n 2.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 2.3. <model_name>_statistical_result.csv: Statistical results of all iterations of each model per epoch\n 2.4. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n\n :param models: List of cvnn.CvnnModel to be compared.\n :param dataset: cvnn.dataset.Dataset with the dataset to be used on the training\n :param open_dataset: (Default: None)\n If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param do_conf_mat: Generate a confusion matrix based on results.\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n \"\"\"\n if open_dataset:\n dataset = dp.OpenDataset(open_dataset) # Warning, open_dataset overwrites dataset\n\n # Monte Carlo\n monte_carlo = MonteCarlo()\n for model in models:\n # model.training_param_summary()\n monte_carlo.add_model(model)\n if not open_dataset and isinstance(dataset, dp.Dataset):\n dataset.save_data(monte_carlo.monte_carlo_analyzer.path)\n monte_carlo.output_config['excel_summary'] = False\n monte_carlo.output_config['tensorboard'] = tensorboard\n monte_carlo.output_config['confusion_matrix'] = do_conf_mat\n monte_carlo.output_config['plot_all'] = do_all\n if plot_data and isinstance(dataset, dp.Dataset):\n dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path,\n library='matplotlib')\n if isinstance(dataset, dp.Dataset):\n x = dataset.x\n y = dataset.y\n data_summary = dataset.summary()\n else:\n x = dataset\n y = None\n data_summary = \"\"\n monte_carlo.run(x, y, iterations=iterations,\n validation_split=validation_split, validation_data=validation_data,\n epochs=epochs, batch_size=batch_size, display_freq=display_freq, early_stop=early_stop,\n shuffle=shuffle, verbose=verbose, data_summary=data_summary, real_cast_modes=polar,\n process_dataset=preprocess_data)\n\n # Save data to remember later what I did.\n _save_montecarlo_log(iterations=iterations,\n path=str(monte_carlo.monte_carlo_analyzer.path),\n models_names=[str(model.name) for model in models],\n dataset_name=data_summary,\n num_classes=str(dataset.y.shape[1]) if isinstance(dataset, dp.Dataset) else \"\", # TODO: GET THIS\n polar_mode=str(polar),\n dataset_size=str(dataset.x.shape[0]) if isinstance(dataset, dp.Dataset) else \"\",\n features_size=str(dataset.x.shape[1]) if isinstance(dataset, dp.Dataset) else \"\",\n epochs=epochs, batch_size=batch_size\n # filename='./log/run_data.csv'\n )\n return str(\"./log/run_data.csv\")\n\n\ndef mlp_run_real_comparison_montecarlo(dataset: cvnn.dataset.Dataset, open_dataset: Optional[t_path] = None,\n iterations: int = 30,\n epochs: int = 300, batch_size: int = 100, display_freq: int = 1,\n optimizer='adam', # TODO: Typing\n shape_raw=None, activation: t_activation = 'cart_relu',\n output_activation: t_activation = DEFAULT_OUTPUT_ACT,\n verbose: Union[bool, int] = False, do_all: bool = True,\n polar: Optional[Union[str, List[Optional[str]], Tuple[Optional[str]]]] = None,\n dropout: float = 0.5, validation_split: float = 0.2,\n validation_data: Optional[Union[Tuple, data.Dataset]] = None,\n # TODO: Add typing of tuple\n capacity_equivalent: bool = True, equiv_technique: str = 'ratio',\n shuffle: bool = True, tensorboard: bool = False, do_conf_mat: bool = False,\n plot_data: bool = True) -> str:\n \"\"\"\n This function is used to compare CVNN vs RVNN performance over any dataset.\n 1. Automatically creates two Multi-Layer Perceptrons (MLP), one complex and one real.\n 2. Runs simulation and compares them.\n 3. Saves several files into ./log/montecarlo/date/of/run/\n 3.1. run_summary.txt: Summary of the run models and data\n 3.2. run_data.csv: Full information of performance of iteration of each model at each epoch\n 3.3. complex_network_statistical_result.csv: Statistical results of all iterations of CVNN per epoch\n 3.4. real_network_statistical_result.csv: Statistical results of all iterations of RVNN per epoch\n 3.5. (Optional) `plot/` folder with the corresponding plots generated by MonteCarloAnalyzer.do_all()\n\n :param dataset: cvnn.dataset.Dataset with the dataset to be used on the training\n :param open_dataset: (None)\n If dataset is saved inside a folder and must be opened, path of the Dataset to be opened. Else None (default)\n :param iterations: Number of iterations to be done for each model\n :param epochs: Number of epochs for each iteration\n :param batch_size: Batch size at each iteration\n :param display_freq: Frequency in terms of epochs of when to do a checkpoint.\n :param optimizer: Optimizer to be used. Keras optimizers are not allowed.\n Can be either cvnn.optimizers.Optimizer or a string listed in opt_dispatcher.\n :param shape_raw: List of sizes of each hidden layer.\n For example [64] will generate a CVNN with one hidden layer of size 64.\n Default None will default to example.\n :param activation: Activation function to be used at each hidden layer\n :param verbose: Different modes according to number:\n - 0 or 'silent': No output at all\n - 1 or False: Progress bar per iteration\n - 2 or True or 'debug': Progress bar per epoch\n :param polar: Boolean weather the RVNN should receive real and imaginary part (False) or amplitude and phase (True)\n :param do_all: If true (default) it creates a `plot/` folder with the plots generated by MonteCarloAnalyzer.do_all()\n :param dropout: (float) Dropout to be used at each hidden layer. If None it will not use any dropout.\n :param validation_split: Float between 0 and 1.\n Percentage of the input data to be used as test set (the rest will be use as train set)\n Default: 0.0 (No validation set).\n This input is ignored if validation_data is given.\n :param validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data. This parameter takes precedence over validation_split.\n It can be:\n - tuple (x_val, y_val) of Numpy arrays or tensors. Preferred data type (less overhead).\n - A tf.data dataset.\n :param capacity_equivalent: An equivalent model can be equivalent in terms of layer neurons or\n trainable parameters (capacity equivalent according to: https://arxiv.org/abs/1811.12351)\n - True, it creates a capacity-equivalent model in terms of trainable parameters\n - False, it will double all layer size (except the last one if classifier=True)\n :param equiv_technique: Used to define the strategy of the capacity equivalent model.\n This parameter is ignored if capacity_equivalent=False\n - 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'\n - 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between\n multiplying by 2 or 1. Special case on the middle is treated as a compromise between the two.\n :param shuffle: TODO\n :return: (string) Full path to the run_data.csv generated file.\n It can be used by cvnn.data_analysis.SeveralMonteCarloComparison to compare several runs.\n \"\"\"\n if shape_raw is None:\n shape_raw = [64]\n if open_dataset:\n dataset = dp.OpenDataset(open_dataset) # Warning, open_dataset overwrites dataset\n input_size = dataset.x.shape[1] # Size of input\n output_size = dataset.y.shape[1] # Size of output\n complex_network = get_mlp(input_size=input_size, output_size=output_size,\n shape_raw=shape_raw, activation=activation, dropout=dropout,\n output_activation=output_activation, optimizer=optimizer)\n\n # Monte Carlo\n monte_carlo = RealVsComplex(complex_network,\n capacity_equivalent=capacity_equivalent, equiv_technique=equiv_technique)\n monte_carlo.output_config['tensorboard'] = tensorboard\n # monte_carlo.output_config['confusion_matrix'] = do_conf_mat\n monte_carlo.output_config['plot_all'] = do_all\n monte_carlo.output_config['excel_summary'] = False\n monte_carlo.output_config['confusion_matrix'] = do_conf_mat\n if plot_data:\n dataset.plot_data(overlapped=True, showfig=False, save_path=monte_carlo.monte_carlo_analyzer.path,\n library='matplotlib')\n sleep(1) # I have error if not because not enough time passed since creation of models to be in diff folders\n monte_carlo.run(dataset.x, dataset.y, iterations=iterations,\n epochs=epochs, batch_size=batch_size, display_freq=display_freq,\n shuffle=shuffle, verbose=verbose, data_summary=dataset.summary(), real_cast_modes=polar,\n validation_split=validation_split, validation_data=validation_data)\n\n # Save data to remember later what I did.\n max_epoch = monte_carlo.pandas_full_data['epoch'].max()\n epoch_filter = monte_carlo.pandas_full_data['epoch'] == max_epoch\n complex_filter = monte_carlo.pandas_full_data['network'] == \"complex_network\"\n real_filter = monte_carlo.pandas_full_data['network'] == \"real_network\"\n complex_last_epochs = monte_carlo.pandas_full_data[epoch_filter & complex_filter]\n real_last_epochs = monte_carlo.pandas_full_data[epoch_filter & real_filter]\n complex_median_train = complex_last_epochs['accuracy'].median()\n real_median_train = real_last_epochs['accuracy'].median()\n try:\n complex_median = complex_last_epochs['val_accuracy'].median()\n real_median = real_last_epochs['val_accuracy'].median()\n complex_err = median_error(complex_last_epochs['val_accuracy'].quantile(.75),\n complex_last_epochs['val_accuracy'].quantile(.25), iterations)\n real_err = median_error(real_last_epochs['val_accuracy'].quantile(.75),\n real_last_epochs['val_accuracy'].quantile(.25), iterations)\n winner = 'CVNN' if complex_median > real_median else 'RVNN'\n except KeyError:\n complex_median = None\n real_median = None\n complex_err = median_error(complex_last_epochs['accuracy'].quantile(.75),\n complex_last_epochs['accuracy'].quantile(.25), iterations)\n real_err = median_error(real_last_epochs['accuracy'].quantile(.75),\n real_last_epochs['accuracy'].quantile(.25), iterations)\n if complex_median_train > real_median_train:\n winner = 'CVNN'\n elif complex_median_train == real_median_train:\n winner = None\n else:\n winner = 'RVNN'\n _save_rvnn_vs_cvnn_montecarlo_log(\n iterations=iterations,\n path=str(monte_carlo.monte_carlo_analyzer.path),\n dataset_name=dataset.dataset_name,\n optimizer=str(complex_network.optimizer.__class__),\n loss=str(complex_network.loss.__class__),\n hl=str(len(shape_raw)), shape=str(shape_raw),\n dropout=str(dropout), num_classes=str(dataset.y.shape[1]),\n polar_mode=str(polar),\n activation=activation,\n dataset_size=str(dataset.x.shape[0]), feature_size=str(dataset.x.shape[1]),\n epochs=epochs, batch_size=batch_size,\n winner=winner,\n complex_median=complex_median, real_median=real_median,\n complex_median_train=complex_median_train, real_median_train=real_median_train,\n complex_err=complex_err, real_err=real_err,\n filename='./log/mlp_montecarlo_summary.xlsx'\n )\n return str(monte_carlo.monte_carlo_analyzer.path / \"run_data.csv\")\n\n\ndef get_mlp(input_size, output_size,\n shape_raw=None, activation=\"cart_relu\", dropout=0.5,\n output_activation='softmax_real_with_abs', optimizer=\"sgd\", name=\"complex_network\"):\n if shape_raw is None:\n shape_raw = [100, 50]\n shape = [\n layers.ComplexInput(input_shape=input_size)\n ]\n if len(shape_raw) == 0:\n shape.append(\n ComplexDense(units=output_size, activation=output_activation, input_dtype=np.complex64)\n )\n else: # len(shape_raw) > 0:\n for s in shape_raw:\n shape.append(ComplexDense(units=s, activation=activation))\n if dropout is not None:\n shape.append(ComplexDropout(rate=dropout))\n shape.append(ComplexDense(units=output_size, activation=output_activation))\n\n complex_network = tf.keras.Sequential(shape, name=name)\n complex_network.compile(optimizer=optimizer, loss=tf.keras.losses.CategoricalCrossentropy(), metrics=['accuracy'])\n return complex_network\n\n\n# ====================================\n# Excel logging\n# ====================================\ndef _create_excel_file(fieldnames: List[str], row_data: List, filename: Optional[t_path] = None,\n percentage_cols: Optional[List[str]] = None):\n if filename is None:\n filename = './log/montecarlo_summary.xlsx'\n file_exists = os.path.isfile(filename)\n if file_exists:\n wb = load_workbook(filename)\n ws = wb.worksheets[0]\n del ws.tables[\"Table1\"]\n else:\n wb = Workbook()\n ws = wb.worksheets[0]\n ws.append(fieldnames)\n ws.append(row_data)\n # TODO: What if len(row_data) is longer than the dictionary? It corresponds with excel's column names?\n tab = Table(displayName=\"Table1\", ref=\"A1:\" + str(chr(64 + len(row_data))) + str(ws.max_row))\n if percentage_cols is not None:\n for col in percentage_cols:\n ws[col + str(ws.max_row)].number_format = '0.00%'\n ws.add_table(tab)\n wb.save(filename)\n\n\ndef _save_rvnn_vs_cvnn_montecarlo_log(iterations, path, dataset_name, hl, shape, dropout, num_classes, polar_mode,\n activation, optimizer, loss,\n dataset_size, feature_size, epochs, batch_size, winner,\n complex_median, real_median, complex_err, real_err,\n complex_median_train, real_median_train,\n comments='', filename=None):\n fieldnames = ['iterations', 'dataset', '# Classes', \"Dataset Size\", 'Feature Size', \"Polar Mode\", \"Optimizer\",\n \"Loss\",\n 'HL', 'Shape', 'Dropout', \"Activation Function\", 'epochs', 'batch size',\n \"Winner\", \"CVNN median\", \"RVNN median\", 'CVNN err', 'RVNN err',\n \"CVNN train median\", \"RVNN train median\",\n 'path', \"cvnn version\", \"Comments\"\n ]\n row_data = [iterations, dataset_name, num_classes, dataset_size, feature_size, polar_mode, # Dataset information\n optimizer, str(loss), hl, shape, dropout, activation, epochs, batch_size, # Model information\n winner, complex_median, real_median, complex_err, real_err, # Preliminary results\n complex_median_train, real_median_train,\n path, cvnn.__version__, comments # Library information\n ]\n percentage_cols = ['P', 'Q', 'R', 'S', 'T', 'U']\n _create_excel_file(fieldnames, row_data, filename, percentage_cols=percentage_cols)\n\n\ndef _save_montecarlo_log(iterations, path, dataset_name, models_names, num_classes, polar_mode, dataset_size,\n features_size, epochs, batch_size, filename=None):\n fieldnames = [\n 'iterations',\n 'dataset', '# Classes', \"Dataset Size\", 'Feature Size', # Dataset information\n 'models', 'epochs', 'batch size', \"Polar Mode\", # Models information\n 'path', \"cvnn version\" # Library information\n ]\n row_data = [\n iterations,\n dataset_name, num_classes, dataset_size, features_size,\n '-'.join(models_names), epochs, batch_size, polar_mode,\n path, cvnn.__version__\n ]\n _create_excel_file(fieldnames, row_data, filename)\n\n\nif __name__ == \"__main__\":\n # Base case with one hidden layer size 64 and dropout 0.5\n run_gaussian_dataset_montecarlo(iterations=10, dropout=0.5)\n" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "numpy.array", "tensorflow.keras.optimizers.serialize", "pandas.DataFrame", "tensorflow.keras.Sequential", "tensorflow.dtypes.as_dtype", "tensorflow.keras.losses.serialize", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.keras.models.clone_model", "tensorflow.keras.callbacks.EarlyStopping" ] ]
Farzanehkaji/MINet
[ "cc2852cb2b3b20208f5edf38ec6952363a9b04a7" ]
[ "code/utils/metric.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2019/7/18 上午9:54\n# @Author : Lart Pang\n# @FileName: metric.py\n# @Project : MINet\n# @GitHub : https://github.com/lartpang\n\nimport numpy as np\n\n\ndef cal_pr_mae_meanf(prediction, gt):\n assert prediction.dtype == np.uint8\n assert gt.dtype == np.uint8\n assert prediction.shape == gt.shape\n\n # 确保图片和真值相同 ##################################################\n # if prediction.shape != gt.shape:\n # prediction = Image.fromarray(prediction).convert('L')\n # gt_temp = Image.fromarray(gt).convert('L')\n # prediction = prediction.resize(gt_temp.size)\n # prediction = np.array(prediction)\n\n # 获得需要的预测图和二值真值 ###########################################\n if prediction.max() == prediction.min():\n prediction = prediction / 255\n else:\n prediction = (prediction - prediction.min()) / (prediction.max() - prediction.min())\n hard_gt = np.zeros_like(gt)\n hard_gt[gt > 128] = 1\n\n # MAE ##################################################################\n mae = np.mean(np.abs(prediction - hard_gt))\n\n # MeanF ################################################################\n threshold_fm = 2 * prediction.mean()\n if threshold_fm > 1:\n threshold_fm = 1\n binary = np.zeros_like(prediction)\n binary[prediction >= threshold_fm] = 1\n tp = (binary * hard_gt).sum()\n if tp == 0:\n meanf = 0\n else:\n pre = tp / binary.sum()\n rec = tp / hard_gt.sum()\n meanf = 1.3 * pre * rec / (0.3 * pre + rec)\n\n # PR curve #############################################################\n t = np.sum(hard_gt)\n precision, recall = [], []\n for threshold in range(256):\n threshold = threshold / 255.0\n hard_prediction = np.zeros_like(prediction)\n hard_prediction[prediction >= threshold] = 1\n\n tp = np.sum(hard_prediction * hard_gt)\n p = np.sum(hard_prediction)\n if tp == 0:\n precision.append(0)\n recall.append(0)\n else:\n precision.append(tp / p)\n recall.append(tp / t)\n\n precision = np.reshape(precision,(256,1))\n recall = np.reshape(recall,(256,1))\n\n return precision, recall, mae, meanf\n\n\n# MaxF #############################################################\ndef cal_maxf(ps, rs):\n assert len(ps) == 256\n assert len(rs) == 256\n maxf = []\n for p, r in zip(ps, rs):\n if p == 0 or r == 0:\n maxf.append(0)\n else:\n maxf.append(1.3 * p * r / (0.3 * p + r))\n\n return max(maxf)\n" ]
[ [ "numpy.sum", "numpy.zeros_like", "numpy.reshape", "numpy.abs" ] ]
alexji/turbopy
[ "b48360451b0f4a2725117c77a23367283c6326c1" ]
[ "turbopy/interp_marcs/testmies.py" ]
[ "from interp_marcs_alpha_v6 import interp_marcs\nimport numpy as np\nimport time\n\ninput_model_path='/project2/alexji/MARCS'\noutput_model_path='test-MARCS'\n\nteff_arr = [3200,3300,3400,3500,3600,3700,3800,3900,4000,4250,4500,4750,\n 5000]\nlogg_arr = np.arange(0., 5.5, 0.5)\nfeh_arr = np.arange(-4., 1.5, 0.5)\nalphafe_arr = [-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.,\n 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]\n\nteff_arr = [3400,4000,4281,5000]\nlogg_arr = [0.0, 0.47, 0.88,2.0]\nfeh_arr = [-3.77, -2.85, -1.23,-1.5]\nalphafe_arr = [-0.77, -0.12, 0.23, 0.66,0.4]\n\n\nif __name__==\"__main__\":\n start = time.time()\n for teff in teff_arr:\n for logg in logg_arr:\n for feh in feh_arr:\n for alphafe in alphafe_arr:\n \n print(teff, logg, feh, alphafe)\n \n interp_marcs(teff, logg, feh, alphafe,\n output_model_path=output_model_path,\n input_model_path=input_model_path,\n check_file_exists=True, extrapol=True,\n geometry='sph')\n print(\"took\",time.time()-start,\"s\")\n" ]
[ [ "numpy.arange" ] ]
olaals/masteroppgave2
[ "9fc181325b6e3ef74d81cdb323d3e47a79bb889e", "9fc181325b6e3ef74d81cdb323d3e47a79bb889e", "9fc181325b6e3ef74d81cdb323d3e47a79bb889e" ]
[ "machine-learning/ml-projects/test-transforms/models/models/model_bn.py", "report/renders/3-method/systems/system-laser-scanner/blender_script.py", "machine-learning/ml-projects/locally-connected/models/models/local.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass CustomModel(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.stage1_block = nn.Sequential(\n nn.Conv2d(\n in_channels=3,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(\n in_channels=64,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.Conv2d(\n in_channels=32,\n out_channels=1,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.Flatten()\n )\n\n\n def forward(self,x):\n x = self.stage1_block(x)\n #print(x.shape)\n return x\n\n\n\n\n \n\n\n", "import bpy\nimport yaml\nfrom oa_blender import *\nfrom oa_luxcore import *\nimport glob\nimport random\nfrom random import uniform\nimport numpy as np\nimport math\nfrom oa_luxcore_materials import *\nfrom oa_bl_dataset_utils import *\nimport matplotlib.pyplot as plt\nimport time\n\nGT_HALT_TIME = 10\nTRAIN_FILT_HALT_TIME = 20\nTRAIN_RAW_HALT_TIME = 30\n\ndef polar2cart(r, theta, phi):\n return (\n r * math.cos(theta),\n r * math.sin(theta),\n r * math.cos(theta)\n )\n\n\nwith open(\"config.yaml\") as f:\n config = yaml.safe_load(f)\n\nprint(config)\n\ndelete_all()\n\nluxcore_setup(120)\n\nr_min, r_max = 1.0, 3.0\ntheta_min, theta_max = -math.pi/6, math.pi/6\nphi_min, phi_max = math.pi/6, math.pi/3\n\n\nstl_path = config[\"stl_path\"]\nprint(stl_path)\nstl_index = config[\"stl_index\"]\nprint(stl_index)\ntrain_filt_path = config[\"train_filt_path\"]\nprint(train_filt_path)\ntrain_raw_path = config[\"train_raw_path\"]\nprint(train_raw_path)\nground_truth_path = config[\"ground_truth_path\"]\nprint(ground_truth_path)\npbrs_path = config[\"pbrs_path\"]\nprint(pbrs_path)\nhdrs_path = config[\"hdrs_path\"]\nprint(hdrs_path)\n\nr = uniform(r_min,r_max)\ntheta = uniform(theta_min, theta_max)\nphi = uniform(phi_min, phi_max)\n\ncamera_px_size = uniform(2e-2, 3e-2)\n\n\nobj = import_stl(stl_path)\ntime.sleep(2)\nprint(\"\")\nintra_axial_dist = uniform(0.2,2.0)\ncam_laser_angle = 2*np.arctan((intra_axial_dist/2)/(r+0.1))\nlaser_scanner = LuxcoreLaserScanner(\"laserScanner\", camera_resolution=(500,500), camera_pixel_size=camera_px_size, intra_axial_dist=intra_axial_dist, angle=cam_laser_angle)\nprint(f'r {r}, theta {theta}, phi {phi}')\nloc = polar2cart(r, theta, phi)\nlaser_scanner.set_location(loc)\nprint(\"\")\nprint(\"Wait for look at\")\nprint(\"\")\nlaser_scanner.look_at((0,0,0))\ntime.sleep(1)\nprint(\"\")\n\n\n\n# GROUND TRUTH RENDER\nbpy.context.scene.world.luxcore.light = 'none'\nbpy.context.scene.luxcore.halt.time = GT_HALT_TIME\nbpy.context.scene.luxcore.config.path.depth_total = 1\ngt_img = laser_scanner.camera.get_image()\ngt_img = convert_to_binary(gt_img)\ngt_img = row_wise_mean_index(gt_img)\ncv2.imwrite(os.path.join(ground_truth_path, \"img\"+('%03d' % stl_index)+\".png\"), gt_img)\n\n# ASSIGN MATERIAL\nif random.random() < 0.8:\n set_random_pbr(obj, pbrs_path)\nelse:\n assign_mix_material(obj,\"Metal\", \"Metal\")\n\n# REFLECTION SETUP\nbpy.context.scene.luxcore.config.path.depth_total = 3\nbpy.context.scene.luxcore.config.path.depth_specular = 3\nbpy.context.scene.luxcore.config.path.depth_diffuse = 3\nbpy.context.scene.luxcore.config.path.depth_glossy = 3\n\n# TRAIN FILTERED RENDER\nbpy.context.scene.luxcore.halt.time = TRAIN_FILT_HALT_TIME\ntrain_img = laser_scanner.camera.get_image()\ntrain_img = cv2.cvtColor(train_img, cv2.COLOR_RGB2BGR)\ncv2.imwrite(os.path.join(train_filt_path, \"img\"+('%03d' % stl_index)+\".png\"), train_img)\n\n# TRAIN RAW RENDER\nset_random_hdri_luxcore(hdrs_path)\nbpy.context.scene.luxcore.halt.time = TRAIN_RAW_HALT_TIME\ntrain_img = laser_scanner.camera.get_image()\ntrain_img = cv2.cvtColor(train_img, cv2.COLOR_RGB2BGR)\ncv2.imwrite(os.path.join(train_raw_path, \"img\"+('%03d' % stl_index)+\".png\"), train_img)\nbpy.data.objects.remove(obj, do_unlink=True)\n\n\n\n\n\nprint(\"Succesfull render\")\n", "import torch\nimport torch.nn as nn\nimport torchex.nn as exnn\nfrom torch.nn.modules.utils import _pair\n\n\nclass LocallyConnected2d(nn.Module):\n def __init__(self, in_channels, out_channels, output_size, kernel_size, stride, bias=False):\n super(LocallyConnected2d, self).__init__()\n output_size = _pair(output_size)\n self.weight = nn.Parameter(\n torch.randn(1, out_channels, in_channels, output_size[0], output_size[1], kernel_size**2)\n )\n if bias:\n self.bias = nn.Parameter(\n torch.randn(1, out_channels, output_size[0], output_size[1])\n )\n else:\n self.register_parameter('bias', None)\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n\n def forward(self, x):\n _, c, h, w = x.size()\n kh, kw = self.kernel_size\n dh, dw = self.stride\n x = x.unfold(2, kh, dh).unfold(3, kw, dw)\n x = x.contiguous().view(*x.size()[:-2], -1)\n # Sum in in_channel and kernel_size dims\n out = (x.unsqueeze(1) * self.weight).sum([2, -1])\n if self.bias is not None:\n out += self.bias\n return out\n\nclass CustomModel(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.stage1_block = nn.Sequential(\n nn.Conv2d(\n in_channels=3,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(\n in_channels=64,\n out_channels=32,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Conv2d(\n in_channels=32,\n out_channels=1,\n kernel_size=3,\n stride=1,\n padding=1\n ),\n nn.ReLU(),\n nn.BatchNorm2d(1),\n torch.nn.ConstantPad2d(1, 0),\n LocallyConnected2d(\n in_channels=1,\n out_channels=1,\n kernel_size=3,\n output_size=(500,500),\n stride=1,\n bias=True\n #padding=1\n ),\n nn.ReLU(),\n nn.Flatten()\n )\n\n\n def forward(self,x):\n x = self.stage1_block(x)\n #print(x.shape)\n return x\n\n\n\n\n \n\n\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.Flatten" ], [ "numpy.arctan" ], [ "torch.nn.modules.utils._pair", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.ConstantPad2d", "torch.nn.Conv2d", "torch.randn", "torch.nn.Flatten" ] ]
asd17539/I3D-Tensorflow
[ "9ee3295f06b8d6c37dc5e146b2cdb756ef725268" ]
[ "experiments/ucf-101/data_list/gen_train_list.py" ]
[ "import os\nimport math\nimport numpy as np\n\n\nroot_path = '/home/project/I3D/data/Kinetics/train_256'\nnum_frames = 16\ndata_list = []\nid_list = []\nlabel_list = []\nerro_data = []\nlabel = 0\nid = 0\n\nfor file_path in sorted(os.listdir(root_path)):\n for video_path in sorted(os.listdir(os.path.join(root_path, file_path))):\n frame_num = len(os.listdir(os.path.join(root_path, file_path, video_path)))\n print('Process: ' + os.path.join(root_path, file_path, video_path), frame_num)\n if frame_num > 0:\n data_list.append(os.path.join(root_path, file_path, video_path))\n id_list.append(id)\n label_list.append(label)\n id += 1\n else:\n erro_data.append(os.path.join(root_path, file_path, video_path))\n label += 1\n if label == 100:\n break\nprint(erro_data)\nprint(len(data_list))\nprint(len(id_list))\nprint(len(label_list))\n\nnp.save('./train_data_list_%d.npy'%label, data_list)\nnp.save('./train_label_list_%d.npy'%label, label_list)\n" ]
[ [ "numpy.save" ] ]
DanielPolatajko/pennylane
[ "d603e810a4d34d727a436d852c540fdc0fe21a85", "d603e810a4d34d727a436d852c540fdc0fe21a85" ]
[ "pennylane/operation.py", "tests/test_qubit_device.py" ]
[ "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=protected-access\nr\"\"\"\nThis module contains the abstract base classes for defining PennyLane\noperations and observables.\n\nDescription\n-----------\n\nQubit Operations\n~~~~~~~~~~~~~~~~\nThe :class:`Operator` class serves as a base class for operators,\nand is inherited by both the :class:`Observable` class and the\n:class:`Operation` class. These classes are subclassed to implement quantum operations\nand measure observables in PennyLane.\n\n* Each :class:`~.Operator` subclass represents a general type of\n map between physical states. Each instance of these subclasses\n represents either\n\n - an application of the operator or\n - an instruction to measure and return the respective result.\n\n Operators act on a sequence of wires (subsystems) using given parameter values.\n\n* Each :class:`~.Operation` subclass represents a type of quantum operation,\n for example a unitary quantum gate. Each instance of these subclasses\n represents an application of the operation with given parameter values to\n a given sequence of wires (subsystems).\n\n* Each :class:`~.Observable` subclass represents a type of physical observable.\n Each instance of these subclasses represents an instruction to measure and\n return the respective result for the given parameter values on a\n sequence of wires (subsystems).\n\nDifferentiation\n^^^^^^^^^^^^^^^\n\nIn general, an :class:`Operation` is differentiable (at least using the finite-difference\nmethod) with respect to a parameter iff\n\n* the domain of that parameter is continuous.\n\nFor an :class:`Operation` to be differentiable with respect to a parameter using the\nanalytic method of differentiation, it must satisfy an additional constraint:\n\n* the parameter domain must be real.\n\n.. note::\n\n These conditions are *not* sufficient for analytic differentiation. For example,\n CV gates must also define a matrix representing their Heisenberg linear\n transformation on the quadrature operators.\n\nFor gates that *are* supported via the analytic method, the gradient recipe\nworks as follows:\n\n.. math:: \\frac{\\partial}{\\partial\\phi_k}f = \\sum_{i} c_i f(a_i \\phi_k+s_i).\n\nwhere :math:`f` is the expectation value of an observable on a circuit that has\nbeen evolved by the operation being considered with parameter :math:`\\phi_k`,\nthere are multiple terms indexed with :math:`i` for each parameter :math:`\\phi`\nand the :math:`[c_i, a_i, s_i]` are coefficients specific to the gate.\n\nThe following specific case holds for example for qubit operations that are\ngenerated by one of the Pauli matrices and results in an overall positive and\nnegative shift:\n\n.. math::\n\n \\frac{\\partial}{\\partial\\phi_k}f = \\frac{1}{2}\\left[f \\left( \\phi_k+\\frac{\\pi}{2} \\right) - f\n \\left( \\phi_k-\\frac{\\pi}{2} \\right)\\right],\n\ni.e., so that :math:`[c_0, a_0, s_0]=[1/2, 1, \\pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\\pi/2]`.\n\nCV Operation base classes\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nDue to additional requirements, continuous-variable (CV) operations must subclass the\n:class:`~.CVOperation` or :class:`~.CVObservable` classes instead of :class:`~.Operation`\nand :class:`~.Observable`.\n\nDifferentiation\n^^^^^^^^^^^^^^^\n\nTo enable gradient computation using the analytic method for Gaussian CV operations, in addition, you need to\nprovide the static class method :meth:`~.CV._heisenberg_rep` that returns the Heisenberg representation of\nthe operation given its list of parameters, namely:\n\n* For Gaussian CV Operations this method should return the matrix of the linear transformation carried out by the\n operation on the vector of quadrature operators :math:`\\mathbf{r}` for the given parameter\n values.\n\n* For Gaussian CV Observables this method should return a real vector (first-order observables)\n or symmetric matrix (second-order observables) of coefficients of the quadrature\n operators :math:`\\x` and :math:`\\p`.\n\nPennyLane uses the convention :math:`\\mathbf{r} = (\\I, \\x, \\p)` for single-mode operations and observables\nand :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)` for multi-mode operations and observables.\n\n.. note::\n Non-Gaussian CV operations and observables are currently only supported via\n the finite-difference method of gradient computation.\n\"\"\"\nimport abc\nimport copy\nimport itertools\nimport functools\nimport numbers\nfrom enum import Enum, IntEnum\n\nimport numpy as np\nfrom numpy.linalg import multi_dot\n\nimport pennylane as qml\nfrom pennylane.wires import Wires\n\nfrom .utils import pauli_eigs\nfrom .variable import Variable\n\n# =============================================================================\n# Wire types\n# =============================================================================\n\n\nclass WiresEnum(IntEnum):\n \"\"\"Integer enumeration class\n to represent the number of wires\n an operation acts on\"\"\"\n\n AnyWires = -1\n AllWires = 0\n\n\nAllWires = WiresEnum.AllWires\n\"\"\"IntEnum: An enumeration which represents all wires in the\nsubsystem. It is equivalent to an integer with value 0.\"\"\"\n\nAnyWires = WiresEnum.AnyWires\n\"\"\"IntEnum: An enumeration which represents any wires in the\nsubsystem. It is equivalent to an integer with value -1.\"\"\"\n\n\n# =============================================================================\n# ObservableReturnTypes types\n# =============================================================================\n\n\nclass ObservableReturnTypes(Enum):\n \"\"\"Enumeration class to represent the return types of an observable.\"\"\"\n\n Sample = \"sample\"\n Variance = \"var\"\n Expectation = \"expval\"\n Probability = \"probs\"\n State = \"state\"\n\n def __repr__(self):\n \"\"\"String representation of the return types.\"\"\"\n return str(self.value)\n\n\nSample = ObservableReturnTypes.Sample\n\"\"\"Enum: An enumeration which represents sampling an observable.\"\"\"\n\nVariance = ObservableReturnTypes.Variance\n\"\"\"Enum: An enumeration which represents returning the variance of\nan observable on specified wires.\"\"\"\n\nExpectation = ObservableReturnTypes.Expectation\n\"\"\"Enum: An enumeration which represents returning the expectation\nvalue of an observable on specified wires.\"\"\"\n\nProbability = ObservableReturnTypes.Probability\n\"\"\"Enum: An enumeration which represents returning probabilities\nof all computational basis states.\"\"\"\n\nState = ObservableReturnTypes.State\n\"\"\"Enum: An enumeration which represents returning the state in the computational basis.\"\"\"\n\n# =============================================================================\n# Class property\n# =============================================================================\n\n\nclass ClassPropertyDescriptor: # pragma: no cover\n \"\"\"Allows a class property to be defined\"\"\"\n\n # pylint: disable=too-few-public-methods\n def __init__(self, fget, fset=None):\n self.fget = fget\n self.fset = fset\n\n def __get__(self, obj, klass=None):\n if klass is None:\n klass = type(obj)\n return self.fget.__get__(obj, klass)()\n\n def __set__(self, obj, value):\n if not self.fset:\n raise AttributeError(\"can't set attribute\")\n type_ = type(obj)\n return self.fset.__get__(obj, type_)(value)\n\n def setter(self, func):\n \"\"\"Set the function as a class method, and store as an attribute.\"\"\"\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n self.fset = func\n return self\n\n\ndef classproperty(func):\n \"\"\"The class property decorator\"\"\"\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n\n return ClassPropertyDescriptor(func)\n\n\n# =============================================================================\n# Base Operator class\n# =============================================================================\n\n\nclass Operator(abc.ABC):\n r\"\"\"Base class for quantum operators supported by a device.\n\n The following class attributes must be defined for all Operators:\n\n * :attr:`~.Operator.num_params`\n * :attr:`~.Operator.num_wires`\n * :attr:`~.Operator.par_domain`\n\n Args:\n params (tuple[float, int, array, Variable]): operator parameters\n\n Keyword Args:\n wires (Iterable[Number, str], Number, str, Wires): Wires that the operator acts on.\n If not given, args[-1] is interpreted as wires.\n do_queue (bool): Indicates whether the operator should be\n immediately pushed into the Operator queue.\n \"\"\"\n do_check_domain = True #: bool: flag: should we perform a domain check for the parameters?\n\n def __copy__(self):\n cls = self.__class__\n copied_op = cls.__new__(cls)\n copied_op.data = self.data.copy()\n copied_op._wires = self.wires\n copied_op._name = self._name\n\n if hasattr(self, \"_inverse\"):\n copied_op._inverse = self._inverse\n\n return copied_op\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n copied_op = cls.__new__(cls)\n\n # The memo dict maps object ID to object, and is required by\n # the deepcopy function to keep track of objects it has already\n # deep copied.\n memo[id(self)] = copied_op\n\n for attribute, value in self.__dict__.items():\n if attribute == \"data\":\n # Shallow copy the list of parameters. We avoid a deep copy\n # here, since PyTorch does not support deep copying of tensors\n # within a differentiable computation.\n copied_op.data = value.copy()\n else:\n # Deep copy everything else.\n setattr(copied_op, attribute, copy.deepcopy(value, memo))\n return copied_op\n\n @classmethod\n def _matrix(cls, *params):\n \"\"\"Matrix representation of the operator\n in the computational basis.\n\n This is a *class method* that should be defined for all\n new operations and observables, that returns the matrix representing\n the operator in the computational basis.\n\n This private method allows matrices to be computed\n directly without instantiating the operators first.\n\n To return the matrices of *instantiated* operators,\n please use the :attr:`~.Operator.matrix` property instead.\n\n **Example:**\n\n >>> qml.RY._matrix(0.5)\n >>> array([[ 0.96891242+0.j, -0.24740396+0.j],\n [ 0.24740396+0.j, 0.96891242+0.j]])\n\n Returns:\n array: matrix representation\n \"\"\"\n raise NotImplementedError\n\n @property\n def matrix(self):\n r\"\"\"Matrix representation of an instantiated operator\n in the computational basis.\n\n **Example:**\n\n >>> U = qml.RY(0.5, wires=1)\n >>> U.matrix\n >>> array([[ 0.96891242+0.j, -0.24740396+0.j],\n [ 0.24740396+0.j, 0.96891242+0.j]])\n\n Returns:\n array: matrix representation\n \"\"\"\n return self._matrix(*self.parameters)\n\n @classmethod\n def _eigvals(cls, *params):\n \"\"\"Eigenvalues of the operator.\n\n This is a *class method* that should be defined for all\n new operations and observables that returns the eigenvalues\n of the operator. Note that the eigenvalues are not guaranteed\n to be in any particular order.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n The default implementation relies on the presence of the\n :attr:`_matrix` method.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.RZ._eigvals(0.5)\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigenvalue representation\n \"\"\"\n return np.linalg.eigvals(cls._matrix(*params))\n\n @property\n def eigvals(self):\n r\"\"\"Eigenvalues of an instantiated operator.\n\n Note that the eigenvalues are not guaranteed to be in any\n particular order.\n\n **Example:**\n\n >>> U = qml.RZ(0.5, wires=1)\n >>> U.eigvals\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigvals representation\n \"\"\"\n return self._eigvals(*self.parameters)\n\n @property\n @abc.abstractmethod\n def num_params(self):\n \"\"\"Number of parameters the operator takes.\"\"\"\n\n @property\n @abc.abstractmethod\n def num_wires(self):\n \"\"\"Number of wires the operator acts on.\"\"\"\n\n @property\n @abc.abstractmethod\n def par_domain(self):\n \"\"\"Domain of the gate parameters.\n\n * ``'N'``: natural numbers (including zero).\n * ``'R'``: floats.\n * ``'A'``: arrays of real or complex values.\n * ``'L'``: list of arrays of real or complex values.\n * ``None``: if there are no parameters.\n \"\"\"\n\n @property\n def name(self):\n \"\"\"String for the name of the operator.\"\"\"\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n\n def __init__(self, *params, wires=None, do_queue=True):\n # pylint: disable=too-many-branches\n self._name = self.__class__.__name__ #: str: name of the operator\n self.queue_idx = None #: int, None: index of the Operator in the circuit queue, or None if not in a queue\n\n if wires is None:\n raise ValueError(\"Must specify the wires that {} acts on\".format(self.name))\n\n if isinstance(wires, Wires):\n self._wires = wires\n else:\n self._wires = Wires(wires) #: Wires: wires on which the operator acts\n\n # check that the number of wires given corresponds to required number\n if (\n self.num_wires != AllWires\n and self.num_wires != AnyWires\n and len(self._wires) != self.num_wires\n ):\n raise ValueError(\n \"{}: wrong number of wires. \"\n \"{} wires given, {} expected.\".format(self.name, len(self._wires), self.num_wires)\n )\n\n if len(params) != self.num_params:\n raise ValueError(\n \"{}: wrong number of parameters. \"\n \"{} parameters passed, {} expected.\".format(self.name, len(params), self.num_params)\n )\n\n # check the validity of the params\n if self.do_check_domain:\n for p in params:\n self.check_domain(p)\n self.data = list(params) #: list[Any]: parameters of the operator\n\n if do_queue:\n self.queue()\n\n def __repr__(self):\n \"\"\"Constructor-call-like representation.\"\"\"\n # FIXME using self.parameters here instead of self.data is dangerous, it assumes the data can be evaluated\n # which is only true if something suitable happens to remain in VariableRef.positional_arg_values etc. after\n # the last evaluation.\n if self.parameters:\n params = \", \".join([repr(p) for p in self.parameters])\n return \"{}({}, wires={})\".format(self.name, params, self.wires.tolist())\n return \"{}(wires={})\".format(self.name, self.wires.tolist())\n\n def check_domain(self, p, flattened=False):\n \"\"\"Check the validity of a parameter.\n\n :class:`.Variable` instances can represent any real scalars (but not arrays).\n\n Args:\n p (Number, array, Variable): parameter to check\n flattened (bool): True means p is an element of a flattened parameter\n sequence (affects the handling of 'A' parameters)\n Raises:\n TypeError: parameter is not an element of the expected domain\n ValueError: parameter is an element of an unknown domain\n Returns:\n Number, array, Variable: p\n \"\"\"\n # pylint: disable=too-many-branches\n # If parameter is a NumPy scalar, convert it into a Python scalar.\n if isinstance(p, np.ndarray) and p.ndim == 0:\n p = p.item()\n\n if isinstance(p, Variable):\n if self.par_domain == \"A\":\n raise TypeError(\n \"{}: Array parameter expected, got a Variable, \"\n \"which can only represent real scalars.\".format(self.name)\n )\n return p\n\n # p is not a Variable\n if self.par_domain == \"A\":\n if flattened:\n if isinstance(p, np.ndarray):\n raise TypeError(\n \"{}: Flattened array parameter expected, got {}.\".format(self.name, type(p))\n )\n else:\n if not isinstance(p, np.ndarray):\n raise TypeError(\n \"{}: Array parameter expected, got {}.\".format(self.name, type(p))\n )\n elif self.par_domain in (\"R\", \"N\"):\n if not isinstance(p, numbers.Real):\n raise TypeError(\n \"{}: Real scalar parameter expected, got {}.\".format(self.name, type(p))\n )\n\n if self.par_domain == \"N\":\n if not isinstance(p, numbers.Integral):\n raise TypeError(\n \"{}: Natural number parameter expected, got {}.\".format(self.name, type(p))\n )\n if p < 0:\n raise TypeError(\n \"{}: Natural number parameter expected, got {}.\".format(self.name, p)\n )\n elif self.par_domain == \"L\":\n if not isinstance(p, list):\n raise TypeError(\"{}: List parameter expected, got {}.\".format(self.name, type(p)))\n if not all(isinstance(elem, np.ndarray) for elem in p):\n raise TypeError(\"List elements must be Numpy arrays.\")\n else:\n raise ValueError(\n \"{}: Unknown parameter domain '{}'.\".format(self.name, self.par_domain)\n )\n return p\n\n @property\n def wires(self):\n \"\"\"Wires of this operator.\n\n Returns:\n Wires: wires\n \"\"\"\n return self._wires\n\n @property\n def parameters(self):\n \"\"\"Current parameter values.\n\n Fixed parameters are returned as is, free parameters represented by\n :class:`.Variable` instances are replaced by their\n current numerical value.\n\n Returns:\n list[Any]: parameter values\n \"\"\"\n # TODO profiling\n def evaluate(p):\n \"\"\"Evaluate a single parameter.\"\"\"\n if isinstance(p, np.ndarray):\n # object arrays may have Variables inside them\n if p.dtype == object:\n temp = np.array([x.val if isinstance(x, Variable) else x for x in p.flat])\n return temp.reshape(p.shape)\n return p\n\n if isinstance(p, list):\n # p is assumed to be a list of numpy arrays\n # object arrays may have Variables inside them\n evaled_list = []\n for arr in p:\n if arr.dtype == object:\n temp = np.array([x.val if isinstance(x, Variable) else x for x in arr.flat])\n evaled_list.append(temp.reshape(arr.shape))\n return evaled_list\n return p\n\n if isinstance(p, Variable):\n p = self.check_domain(p.val)\n return p\n\n return [evaluate(p) for p in self.data]\n\n def queue(self):\n \"\"\"Append the operator to the Operator queue.\"\"\"\n qml.QueuingContext.append(self)\n\n return self # so pre-constructed Observable instances can be queued and returned in a single statement\n\n\n# =============================================================================\n# Base Operation class\n# =============================================================================\n\n\nclass Operation(Operator):\n r\"\"\"Base class for quantum operations supported by a device.\n\n As with :class:`~.Operator`, the following class attributes must be\n defined for all operations:\n\n * :attr:`~.Operator.num_params`\n * :attr:`~.Operator.num_wires`\n * :attr:`~.Operator.par_domain`\n\n The following two class attributes are optional, but in most cases\n should be clearly defined to avoid unexpected behavior during\n differentiation.\n\n * :attr:`~.Operation.grad_method`\n * :attr:`~.Operation.grad_recipe`\n\n Finally, there are some additional optional class attributes\n that may be set, and used by certain quantum optimizers:\n\n * :attr:`~.Operation.generator`\n\n Args:\n params (tuple[float, int, array, Variable]): operation parameters\n\n Keyword Args:\n wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]\n is interpreted as wires.\n do_queue (bool): Indicates whether the operation should be\n immediately pushed into a :class:`BaseQNode` circuit queue.\n This flag is useful if there is some reason to run an Operation\n outside of a BaseQNode context.\n \"\"\"\n # pylint: disable=abstract-method\n string_for_inverse = \".inv\"\n\n @property\n def grad_method(self):\n \"\"\"Gradient computation method.\n\n * ``'A'``: analytic differentiation using the parameter-shift method.\n * ``'F'``: finite difference numerical differentiation.\n * ``None``: the operation may not be differentiated.\n\n Default is ``'F'``, or ``None`` if the Operation has zero parameters.\n \"\"\"\n return None if self.num_params == 0 else \"F\"\n\n grad_recipe = None\n r\"\"\"tuple(Union(list[list[float]], None)) or None: Gradient recipe for the\n parameter-shift method.\n\n This is a tuple with one nested list per operation parameter. For\n parameter :math:`\\phi_k`, the nested list contains elements of the form\n :math:`[c_i, a_i, s_i]` where :math:`i` is the index of the\n term, resulting in a gradient recipe of\n\n .. math:: \\frac{\\partial}{\\partial\\phi_k}f = \\sum_{i} c_i f(a_i \\phi_k + s_i).\n\n If ``None``, the default gradient recipe containing the two terms\n :math:`[c_0, a_0, s_0]=[1/2, 1, \\pi/2]` and :math:`[c_1, a_1,\n s_1]=[-1/2, 1, -\\pi/2]` is assumed for every parameter.\n \"\"\"\n\n def get_parameter_shift(self, idx, shift=np.pi / 2):\n \"\"\"Multiplier and shift for the given parameter, based on its gradient recipe.\n\n Args:\n idx (int): parameter index\n\n Returns:\n float, float: multiplier, shift\n \"\"\"\n # get the gradient recipe for this parameter\n recipe = self.grad_recipe[idx]\n\n # Default values\n multiplier = 0.5 / np.sin(shift)\n a = 1\n\n # We set the default recipe following:\n # ∂f(x) = c*f(x+s) - c*f(x-s)\n # where we express a positive and a negative shift by default\n default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]]\n param_shift = default_param_shift if recipe is None else recipe\n\n if hasattr(self.data[idx], \"mult\"):\n # Parameter is a variable, we are in non-tape mode\n # Need to use the internal multiplier in the Variable to update the\n # multiplier and the shift\n var_mult = self.data[idx].mult\n\n for elem in param_shift:\n\n # Update the multiplier\n elem[0] *= var_mult\n if var_mult != 0:\n # Update the shift\n # zero multiplier means the shift is unimportant\n elem[2] /= var_mult\n return param_shift\n\n @property\n def generator(self):\n r\"\"\"Generator of the operation.\n\n A length-2 list ``[generator, scaling_factor]``, where\n\n * ``generator`` is an existing PennyLane\n operation class or :math:`2\\times 2` Hermitian array\n that acts as the generator of the current operation\n\n * ``scaling_factor`` represents a scaling factor applied\n to the generator operation\n\n For example, if :math:`U(\\theta)=e^{i0.7\\theta \\sigma_x}`, then\n :math:`\\sigma_x`, with scaling factor :math:`s`, is the generator\n of operator :math:`U(\\theta)`:\n\n .. code-block:: python\n\n generator = [PauliX, 0.7]\n\n Default is ``[None, 1]``, indicating the operation has no generator.\n \"\"\"\n return [None, 1]\n\n @property\n def inverse(self):\n \"\"\"Boolean determining if the inverse of the operation was requested.\"\"\"\n return self._inverse\n\n @inverse.setter\n def inverse(self, boolean):\n self._inverse = boolean\n\n @staticmethod\n def decomposition(*params, wires):\n \"\"\"Returns a template decomposing the operation into other\n quantum operations.\"\"\"\n raise NotImplementedError\n\n def inv(self):\n \"\"\"Inverts the operation, such that the inverse will\n be used for the computations by the specific device.\n\n This method concatenates a string to the name of the operation,\n to indicate that the inverse will be used for computations.\n\n Any subsequent call of this method will toggle between the original\n operation and the inverse of the operation.\n\n Returns:\n :class:`Operator`: operation to be inverted\n \"\"\"\n self.inverse = not self._inverse\n return self\n\n @property\n def matrix(self):\n op_matrix = self._matrix(*self.parameters)\n\n if self.inverse:\n return op_matrix.conj().T\n\n return op_matrix\n\n @property\n def eigvals(self):\n op_eigvals = self._eigvals(*self.parameters)\n\n if self.inverse:\n return op_eigvals.conj()\n\n return op_eigvals\n\n @property\n def base_name(self):\n \"\"\"Get base name of the operator.\"\"\"\n return self.__class__.__name__\n\n @property\n def name(self):\n \"\"\"Get and set the name of the operator.\"\"\"\n return self._name + Operation.string_for_inverse if self.inverse else self._name\n\n def __init__(self, *params, wires=None, do_queue=True):\n\n self._inverse = False\n\n # check the grad_method validity\n if self.par_domain == \"N\":\n assert (\n self.grad_method is None\n ), \"An operation may only be differentiated with respect to real scalar parameters.\"\n elif self.par_domain == \"A\":\n assert self.grad_method in (\n None,\n \"F\",\n ), \"Operations that depend on arrays containing free variables may only be differentiated using the F method.\"\n\n # check the grad_recipe validity\n if self.grad_method == \"A\":\n if self.grad_recipe is None:\n # default recipe for every parameter\n self.grad_recipe = [None] * self.num_params\n else:\n assert (\n len(self.grad_recipe) == self.num_params\n ), \"Gradient recipe must have one entry for each parameter!\"\n else:\n assert self.grad_recipe is None, \"Gradient recipe is only used by the A method!\"\n\n super().__init__(*params, wires=wires, do_queue=do_queue)\n\n\nclass DiagonalOperation(Operation):\n r\"\"\"Base class for diagonal quantum operations supported by a device.\n\n As with :class:`~.Operation`, the following class attributes must be\n defined for all operations:\n\n * :attr:`~.Operator.num_params`\n * :attr:`~.Operator.num_wires`\n * :attr:`~.Operator.par_domain`\n\n The following two class attributes are optional, but in most cases\n should be clearly defined to avoid unexpected behavior during\n differentiation.\n\n * :attr:`~.Operation.grad_method`\n * :attr:`~.Operation.grad_recipe`\n\n Finally, there are some additional optional class attributes\n that may be set, and used by certain quantum optimizers:\n\n * :attr:`~.Operation.generator`\n\n Args:\n params (tuple[float, int, array, Variable]): operation parameters\n\n Keyword Args:\n wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]\n is interpreted as wires.\n do_queue (bool): Indicates whether the operation should be\n immediately pushed into a :class:`BaseQNode` circuit queue.\n This flag is useful if there is some reason to run an Operation\n outside of a BaseQNode context.\n \"\"\"\n # pylint: disable=abstract-method\n\n @classmethod\n def _eigvals(cls, *params):\n \"\"\"Eigenvalues of the operator.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors.\n\n This is a *class method* that must be defined for all\n new diagonal operations, that returns the eigenvalues\n of the operator in the computational basis.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.RZ._eigvals(0.5)\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigenvalue representation\n \"\"\"\n raise NotImplementedError\n\n @property\n def eigvals(self):\n r\"\"\"Eigenvalues of an instantiated diagonal operation.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors.\n\n **Example:**\n\n >>> U = qml.RZ(0.5, wires=1)\n >>> U.eigvals\n >>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])\n\n Returns:\n array: eigvals representation\n \"\"\"\n return super().eigvals\n\n @classmethod\n def _matrix(cls, *params):\n return np.diag(cls._eigvals(*params))\n\n\nclass Channel(Operation, abc.ABC):\n r\"\"\"Base class for quantum channels.\n\n As with :class:`~.Operation`, the following class attributes must be\n defined for all channels:\n\n * :attr:`~.Operator.num_params`\n * :attr:`~.Operator.num_wires`\n * :attr:`~.Operator.par_domain`\n\n To define a noisy channel, the following attribute of :class:`~.Channel`\n can be used to list the corresponding Kraus matrices.\n\n * :attr:`~.Channel._kraus_matrices`\n\n The following two class attributes are optional, but in most cases\n should be clearly defined to avoid unexpected behavior during\n differentiation.\n\n * :attr:`~.Operation.grad_method`\n * :attr:`~.Operation.grad_recipe`\n\n Args:\n params (tuple[float, int, array, Variable]): operation parameters\n\n Keyword Args:\n wires (Sequence[int]): Subsystems the channel acts on. If not given, args[-1]\n is interpreted as wires.\n do_queue (bool): Indicates whether the operation should be\n immediately pushed into a :class:`BaseQNode` circuit queue.\n This flag is useful if there is some reason to run an Operation\n outside of a BaseQNode context.\n \"\"\"\n # pylint: disable=abstract-method\n\n @classmethod\n @abc.abstractmethod\n def _kraus_matrices(cls, *params):\n \"\"\"Kraus matrices representing a quantum channel, specified in\n the computational basis.\n\n This is a class method that should be defined for all\n new channels. It returns the Kraus matrices representing\n the channel in the computational basis.\n\n This private method allows matrices to be computed\n directly without instantiating the channel first.\n\n **Example**\n\n >>> qml.AmplitudeDamping._kraus_matrices(0.1)\n >>> [array([[1. , 0. ],\n [0. , 0.9486833]]), array([[0. , 0.31622777],\n [0. , 0. ]])]\n\n To return the Kraus matrices of an *instantiated* channel,\n please use the :attr:`~.Operator.kraus_matrices` property instead.\n\n Returns:\n list(array): list of Kraus matrices\n \"\"\"\n raise NotImplementedError\n\n @property\n def kraus_matrices(self):\n r\"\"\"Kraus matrices of an instantiated channel\n in the computational basis.\n\n ** Example**\n\n >>> U = qml.AmplitudeDamping(0.1, wires=1)\n >>> U.kraus_matrices\n >>> [array([[1. , 0. ],\n [0. , 0.9486833]]), array([[0. , 0.31622777],\n [0. , 0. ]])]\n\n Returns:\n list(array): list of Kraus matrices\n \"\"\"\n return self._kraus_matrices(*self.parameters)\n\n\n# =============================================================================\n# Base Observable class\n# =============================================================================\n\n\nclass Observable(Operator):\n \"\"\"Base class for observables supported by a device.\n\n :class:`Observable` is used to describe Hermitian quantum observables.\n\n As with :class:`~.Operator`, the following class attributes must be\n defined for all observables:\n\n * :attr:`~.Operator.num_params`\n * :attr:`~.Operator.num_wires`\n * :attr:`~.Operator.par_domain`\n\n Args:\n params (tuple[float, int, array, Variable]): observable parameters\n\n Keyword Args:\n wires (Sequence[int]): subsystems it acts on.\n Currently, only one subsystem is supported.\n do_queue (bool): Indicates whether the operation should be\n immediately pushed into the Operator queue.\n \"\"\"\n\n # pylint: disable=abstract-method\n return_type = None\n\n @classmethod\n def _eigvals(cls, *params):\n \"\"\"Eigenvalues of the observable.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors when the observable is\n diagonalized using :attr:`diagonalizing_gates`.\n\n This is a *class method* that must be defined for all\n new diagonal operations, that returns the eigenvalues\n of the operator in the computational basis.\n\n This private method allows eigenvalues to be computed\n directly without instantiating the operators first.\n\n To return the eigenvalues of *instantiated* operators,\n please use the :attr:`~.Operator.eigvals` property instead.\n\n **Example:**\n\n >>> qml.PauliZ._eigvals()\n >>> array([1, -1])\n\n Returns:\n array: eigenvalue representation\n \"\"\"\n raise NotImplementedError\n\n @property\n def eigvals(self):\n r\"\"\"Eigenvalues of an instantiated observable.\n\n The order of the eigenvalues needs to match the order of\n the computational basis vectors when the observable is\n diagonalized using :attr:`diagonalizing_gates`. This is a\n requirement for using qubit observables in quantum functions.\n\n **Example:**\n\n >>> U = qml.PauliZ(wires=1)\n >>> U.eigvals\n >>> array([1, -1])\n\n Returns:\n array: eigvals representation\n \"\"\"\n return super().eigvals\n\n def __init__(self, *params, wires=None, do_queue=True):\n # extract the arguments\n if wires is None:\n wires = params[-1]\n params = params[:-1]\n\n super().__init__(*params, wires=wires, do_queue=do_queue)\n\n def __repr__(self):\n \"\"\"Constructor-call-like representation.\"\"\"\n temp = super().__repr__()\n\n if self.return_type is None:\n return temp\n\n if self.return_type is Probability:\n return repr(self.return_type) + \"(wires={})\".format(self.wires.tolist())\n\n return repr(self.return_type) + \"(\" + temp + \")\"\n\n def __matmul__(self, other):\n if isinstance(other, Tensor):\n return other.__rmatmul__(self)\n\n if isinstance(other, Observable):\n return Tensor(self, other)\n\n raise ValueError(\"Can only perform tensor products between observables.\")\n\n def _obs_data(self):\n r\"\"\"Extracts the data from a Observable or Tensor and serializes it in an order-independent fashion.\n\n This allows for comparison between observables that are equivalent, but are expressed\n in different orders. For example, `qml.PauliX(0) @ qml.PauliZ(1)` and\n `qml.PauliZ(1) @ qml.PauliX(0)` are equivalent observables with different orderings.\n\n **Example**\n\n >>> tensor = qml.PauliX(0) @ qml.PauliZ(1)\n >>> print(tensor._obs_data())\n {(\"PauliZ\", <Wires = [1]>, ()), (\"PauliX\", <Wires = [0]>, ())}\n \"\"\"\n obs = Tensor(self).non_identity_obs\n tensor = set()\n\n for ob in obs:\n parameters = tuple(param.tostring() for param in ob.parameters)\n tensor.add((ob.name, ob.wires, parameters))\n\n return tensor\n\n def compare(self, other):\n r\"\"\"Compares with another :class:`~.Hamiltonian`, :class:`~Tensor`, or :class:`~Observable`,\n to determine if they are equivalent.\n\n Observables/Hamiltonians are equivalent if they represent the same operator\n (their matrix representations are equal), and they are defined on the same wires.\n\n .. Warning::\n\n The compare method does **not** check if the matrix representation\n of a :class:`~.Hermitian` observable is equal to an equivalent\n observable expressed in terms of Pauli matrices.\n To do so would require the matrix form of Hamiltonians and Tensors\n be calculated, which would drastically increase runtime.\n\n Returns:\n (bool): True if equivalent.\n\n **Examples**\n\n >>> ob1 = qml.PauliX(0) @ qml.Identity(1)\n >>> ob2 = qml.Hamiltonian([1], [qml.PauliX(0)])\n >>> ob1.compare(ob2)\n True\n >>> ob1 = qml.PauliX(0)\n >>> ob2 = qml.Hermitian(np.array([[0, 1], [1, 0]]), 0)\n >>> ob1.compare(ob2)\n False\n \"\"\"\n if isinstance(other, (Tensor, Observable)):\n return other._obs_data() == self._obs_data()\n if isinstance(other, qml.Hamiltonian):\n return other.compare(self)\n\n raise ValueError(\n \"Can only compare an Observable/Tensor, and a Hamiltonian/Observable/Tensor.\"\n )\n\n def __add__(self, other):\n r\"\"\"The addition operation between Observables/Tensors/qml.Hamiltonian objects.\"\"\"\n if isinstance(other, (Observable, Tensor)):\n return qml.Hamiltonian([1, 1], [self, other], simplify=True)\n\n if isinstance(other, qml.Hamiltonian):\n return other + self\n\n raise ValueError(f\"Cannot add Observable and {type(other)}\")\n\n def __mul__(self, a):\n r\"\"\"The scalar multiplication operation between a scalar and an Observable/Tensor.\"\"\"\n if isinstance(a, (int, float)):\n return qml.Hamiltonian([a], [self], simplify=True)\n\n raise ValueError(f\"Cannot multiply Observable by {type(a)}\")\n\n __rmul__ = __mul__\n\n def __sub__(self, other):\n r\"\"\"The subtraction operation between Observables/Tensors/qml.Hamiltonian objects.\"\"\"\n if isinstance(other, (Observable, Tensor, qml.Hamiltonian)):\n return self.__add__(other.__mul__(-1))\n raise ValueError(f\"Cannot subtract {type(other)} from Observable\")\n\n def diagonalizing_gates(self):\n r\"\"\"Returns the list of operations such that they\n diagonalize the observable in the computational basis.\n\n Returns:\n list(qml.Operation): A list of gates that diagonalize\n the observable in the computational basis.\n \"\"\"\n raise NotImplementedError\n\n\nclass Tensor(Observable):\n \"\"\"Container class representing tensor products of observables.\n\n To create a tensor, simply initiate it like so:\n\n >>> T = Tensor(qml.PauliX(0), qml.Hermitian(A, [1, 2]))\n\n You can also create a tensor from other Tensors:\n\n >>> T = Tensor(T, qml.PauliZ(4))\n\n The ``@`` symbol can be used as a tensor product operation:\n\n >>> T = qml.PauliX(0) @ qml.Hadamard(2)\n \"\"\"\n\n # pylint: disable=abstract-method\n return_type = None\n tensor = True\n par_domain = None\n\n def __init__(self, *args): # pylint: disable=super-init-not-called\n\n self._eigvals_cache = None\n self.obs = []\n\n for o in args:\n if isinstance(o, Tensor):\n self.obs.extend(o.obs)\n elif isinstance(o, Observable):\n self.obs.append(o)\n else:\n raise ValueError(\"Can only perform tensor products between observables.\")\n\n def __copy__(self):\n cls = self.__class__\n copied_op = cls.__new__(cls)\n copied_op.obs = self.obs.copy()\n copied_op._eigvals_cache = self._eigvals_cache\n return copied_op\n\n def __repr__(self):\n \"\"\"Constructor-call-like representation.\"\"\"\n\n s = \" @ \".join([repr(o) for o in self.obs])\n\n if self.return_type is None:\n return s\n\n if self.return_type is Probability:\n return repr(self.return_type) + \"(wires={})\".format(self.wires.tolist())\n\n return repr(self.return_type) + \"(\" + s + \")\"\n\n @property\n def name(self):\n \"\"\"All constituent observable names making up the tensor product.\n\n Returns:\n list[str]: list containing all observable names\n \"\"\"\n return [o.name for o in self.obs]\n\n @property\n def num_wires(self):\n \"\"\"Number of wires the tensor product acts on.\n\n Returns:\n int: number of wires\n \"\"\"\n return len(self.wires)\n\n @property\n def wires(self):\n \"\"\"All wires in the system the tensor product acts on.\n\n Returns:\n Wires: wires addressed by the observables in the tensor product\n \"\"\"\n return Wires.all_wires([o.wires for o in self.obs])\n\n @property\n def data(self):\n \"\"\"Raw parameters of all constituent observables in the tensor product.\n\n Returns:\n list[Any]: flattened list containing all dependent parameters\n \"\"\"\n return [p for sublist in [o.data for o in self.obs] for p in sublist]\n\n @property\n def num_params(self):\n \"\"\"Raw parameters of all constituent observables in the tensor product.\n\n Returns:\n list[Any]: flattened list containing all dependent parameters\n \"\"\"\n return len(self.data)\n\n @property\n def parameters(self):\n \"\"\"Evaluated parameter values of all constituent observables in the tensor product.\n\n Returns:\n list[list[Any]]: nested list containing the parameters per observable\n in the tensor product\n \"\"\"\n return [o.parameters for o in self.obs]\n\n @property\n def non_identity_obs(self):\n \"\"\"Returns the non-identity observables contained in the tensor product.\n\n Returns:\n list[:class:`~.Observable`]: list containing the non-identity observables\n in the tensor product\n \"\"\"\n return [obs for obs in self.obs if not isinstance(obs, qml.Identity)]\n\n def __matmul__(self, other):\n if isinstance(other, Tensor):\n self.obs.extend(other.obs)\n return self\n\n if isinstance(other, Observable):\n self.obs.append(other)\n return self\n\n raise ValueError(\"Can only perform tensor products between observables.\")\n\n def __rmatmul__(self, other):\n if isinstance(other, Observable):\n self.obs[:0] = [other]\n return self\n\n raise ValueError(\"Can only perform tensor products between observables.\")\n\n __imatmul__ = __matmul__\n\n @property\n def eigvals(self):\n \"\"\"Return the eigenvalues of the specified tensor product observable.\n\n This method uses pre-stored eigenvalues for standard observables where\n possible.\n\n Returns:\n array[float]: array containing the eigenvalues of the tensor product\n observable\n \"\"\"\n if self._eigvals_cache is not None:\n return self._eigvals_cache\n\n standard_observables = {\"PauliX\", \"PauliY\", \"PauliZ\", \"Hadamard\"}\n\n # observable should be Z^{\\otimes n}\n self._eigvals_cache = pauli_eigs(len(self.wires))\n\n # Sort observables lexicographically by the strings of the wire labels\n # TODO: check for edge cases of the sorting, e.g. Tensor(Hermitian(obs, wires=[0, 2]),\n # Hermitian(obs, wires=[1, 3, 4])\n # Sorting the observables based on wires, so that the order of\n # the eigenvalues is correct\n obs_sorted = sorted(self.obs, key=lambda x: [str(l) for l in x.wires.labels])\n\n # check if there are any non-standard observables (such as Identity)\n if set(self.name) - standard_observables:\n # Tensor product of observables contains a mixture\n # of standard and non-standard observables\n self._eigvals_cache = np.array([1])\n for k, g in itertools.groupby(obs_sorted, lambda x: x.name in standard_observables):\n if k:\n # Subgroup g contains only standard observables.\n self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g))))\n else:\n # Subgroup g contains only non-standard observables.\n for ns_ob in g:\n # loop through all non-standard observables\n self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals)\n\n return self._eigvals_cache\n\n def diagonalizing_gates(self):\n \"\"\"Return the gate set that diagonalizes a circuit according to the\n specified tensor observable.\n\n This method uses pre-stored eigenvalues for standard observables where\n possible and stores the corresponding eigenvectors from the eigendecomposition.\n\n Returns:\n list: list containing the gates diagonalizing the tensor observable\n \"\"\"\n diag_gates = []\n for o in self.obs:\n diag_gates.extend(o.diagonalizing_gates())\n\n return diag_gates\n\n @property\n def matrix(self):\n r\"\"\"Matrix representation of the tensor operator\n in the computational basis.\n\n **Example:**\n\n Note that the returned matrix *only includes explicitly\n declared observables* making up the tensor product;\n that is, it only returns the matrix for the specified\n subsystem it is defined for.\n\n >>> O = qml.PauliZ(0) @ qml.PauliZ(2)\n >>> O.matrix\n array([[ 1, 0, 0, 0],\n [ 0, -1, 0, 0],\n [ 0, 0, -1, 0],\n [ 0, 0, 0, 1]])\n\n To get the full :math:`2^3\\times 2^3` Hermitian matrix\n acting on the 3-qubit system, the identity on wire 1\n must be explicitly included:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n >>> O.matrix\n array([[ 1., 0., 0., 0., 0., 0., 0., 0.],\n [ 0., -1., 0., -0., 0., -0., 0., -0.],\n [ 0., 0., 1., 0., 0., 0., 0., 0.],\n [ 0., -0., 0., -1., 0., -0., 0., -0.],\n [ 0., 0., 0., 0., -1., -0., -0., -0.],\n [ 0., -0., 0., -0., -0., 1., -0., 0.],\n [ 0., 0., 0., 0., -0., -0., -1., -0.],\n [ 0., -0., 0., -0., -0., 0., -0., 1.]])\n\n Returns:\n array: matrix representation\n \"\"\"\n # group the observables based on what wires they act on\n U_list = []\n for _, g in itertools.groupby(self.obs, lambda x: x.wires.labels):\n # extract the matrices of each diagonalizing gate\n mats = [i.matrix for i in g]\n\n if len(mats) > 1:\n # multiply all unitaries together before appending\n mats = [multi_dot(mats)]\n\n # append diagonalizing unitary for specific wire to U_list\n U_list.append(mats[0])\n\n # Return the Hermitian matrix representing the observable\n # over the defined wires.\n return functools.reduce(np.kron, U_list)\n\n def prune(self):\n \"\"\"Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from\n the observables building up the :class:`~.Tensor`.\n\n The ``return_type`` attribute is preserved while pruning.\n\n If the tensor product only contains one observable, then this observable instance is\n returned.\n\n Note that, as a result, this method can return observables that are not a :class:`~.Tensor`\n instance.\n\n **Example:**\n\n Pruning that returns a :class:`~.Tensor`:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\n >>> O.prune()\n <pennylane.operation.Tensor at 0x7fc1642d1590\n >>> [(o.name, o.wires) for o in O.prune().obs]\n [('PauliZ', [0]), ('PauliZ', [2])]\n\n Pruning that returns a single observable:\n\n >>> O = qml.PauliZ(0) @ qml.Identity(1)\n >>> O_pruned = O.prune()\n >>> (O_pruned.name, O_pruned.wires)\n ('PauliZ', [0])\n\n Returns:\n ~.Observable: the pruned tensor product of observables\n \"\"\"\n if len(self.non_identity_obs) == 0:\n # Return a single Identity as the tensor only contains Identities\n obs = qml.Identity(self.wires[0])\n elif len(self.non_identity_obs) == 1:\n obs = self.non_identity_obs[0]\n else:\n obs = Tensor(*self.non_identity_obs)\n\n obs.return_type = self.return_type\n return obs\n\n\n# =============================================================================\n# CV Operations and observables\n# =============================================================================\n\n\nclass CV:\n \"\"\"A mixin base class denoting a continuous-variable operation.\"\"\"\n\n # pylint: disable=no-member\n\n def heisenberg_expand(self, U, wires):\n \"\"\"Expand the given local Heisenberg-picture array into a full-system one.\n\n Args:\n U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``)\n wires (Wires): wires on the device the array ``U`` should be expanded\n to apply to\n\n Raises:\n ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect\n\n Returns:\n array[float]: expanded array, dimension ``1+2*num_wires``\n \"\"\"\n\n U_dim = len(U)\n nw = len(self.wires)\n\n if U.ndim > 2:\n raise ValueError(\"Only order-1 and order-2 arrays supported.\")\n\n if U_dim != 1 + 2 * nw:\n raise ValueError(\"{}: Heisenberg matrix is the wrong size {}.\".format(self.name, U_dim))\n\n if len(wires) == 0 or len(self.wires) == len(wires):\n # no expansion necessary (U is a full-system matrix in the correct order)\n return U\n\n if not wires.contains_wires(self.wires):\n raise ValueError(\n \"{}: Some observable wires {} do not exist on this device with wires {}\".format(\n self.name, self.wires, wires\n )\n )\n\n # get the indices that the operation's wires have on the device\n wire_indices = wires.indices(self.wires)\n\n # expand U into the I, x_0, p_0, x_1, p_1, ... basis\n dim = 1 + len(wires) * 2\n\n def loc(w):\n \"Returns the slice denoting the location of (x_w, p_w) in the basis.\"\n ind = 2 * w + 1\n return slice(ind, ind + 2)\n\n if U.ndim == 1:\n W = np.zeros(dim)\n W[0] = U[0]\n for k, w in enumerate(wire_indices):\n W[loc(w)] = U[loc(k)]\n elif U.ndim == 2:\n if isinstance(self, Observable):\n W = np.zeros((dim, dim))\n else:\n W = np.eye(dim)\n\n W[0, 0] = U[0, 0]\n\n for k1, w1 in enumerate(wire_indices):\n s1 = loc(k1)\n d1 = loc(w1)\n\n # first column\n W[d1, 0] = U[s1, 0]\n # first row (for gates, the first row is always (1, 0, 0, ...), but not for observables!)\n W[0, d1] = U[0, s1]\n\n for k2, w2 in enumerate(wire_indices):\n W[d1, loc(w2)] = U[s1, loc(k2)] # block k1, k2 in U goes to w1, w2 in W.\n return W\n\n @staticmethod\n def _heisenberg_rep(p):\n r\"\"\"Heisenberg picture representation of the operation.\n\n * For Gaussian CV gates, this method returns the matrix of the linear\n transformation carried out by the gate for the given parameter values.\n The method is not defined for non-Gaussian gates.\n\n **The existence of this method is equivalent to setting** ``grad_method = 'A'``.\n\n * For observables, returns a real vector (first-order observables) or\n symmetric matrix (second-order observables) of expansion coefficients\n of the observable.\n\n For single-mode Operations we use the basis :math:`\\mathbf{r} = (\\I, \\x, \\p)`.\n For multi-mode Operations we use the basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n .. note::\n\n For gates, we assume that the inverse transformation is obtained\n by negating the first parameter.\n\n Args:\n p (Sequence[float]): parameter values for the transformation\n\n Returns:\n array[float]: :math:`\\tilde{U}` or :math:`q`\n \"\"\"\n # pylint: disable=unused-argument\n return None\n\n @classproperty\n def supports_heisenberg(self):\n \"\"\"Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep`\n static method, thereby indicating that it is Gaussian and does not block the use\n of the parameter-shift differentiation method if found between the differentiated gate\n and an observable.\n \"\"\"\n return CV._heisenberg_rep != self._heisenberg_rep\n\n\nclass CVOperation(CV, Operation):\n \"\"\"Base class for continuous-variable quantum operations.\"\"\"\n\n # pylint: disable=abstract-method\n\n @classproperty\n def supports_parameter_shift(self):\n \"\"\"Returns True iff the CV Operation supports the parameter-shift differentiation method.\n This means that it has ``grad_method='A'`` and\n has overridden the :meth:`~.CV._heisenberg_rep` static method.\n \"\"\"\n return self.grad_method == \"A\" and self.supports_heisenberg\n\n def heisenberg_pd(self, idx):\n \"\"\"Partial derivative of the Heisenberg picture transform matrix.\n\n Computed using grad_recipe.\n\n Args:\n idx (int): index of the parameter with respect to which the\n partial derivative is computed.\n Returns:\n array[float]: partial derivative\n \"\"\"\n # get the gradient recipe for this parameter\n recipe = self.grad_recipe[idx]\n\n # Default values\n multiplier = 0.5\n a = 1\n shift = np.pi / 2\n\n # We set the default recipe to as follows:\n # ∂f(x) = c*f(x+s) - c*f(x-s)\n default_param_shift = [[multiplier, a, shift], [-multiplier, a, -shift]]\n param_shift = default_param_shift if recipe is None else recipe\n\n pd = None # partial derivative of the transformation\n\n p = self.parameters\n\n original_p_idx = p[idx]\n for c, _a, s in param_shift:\n # evaluate the transform at the shifted parameter values\n p[idx] = _a * original_p_idx + s\n U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none\n\n if pd is None:\n pd = c * U\n else:\n pd += c * U\n\n return pd\n\n def heisenberg_tr(self, wires, inverse=False):\n r\"\"\"Heisenberg picture representation of the linear transformation carried\n out by the gate at current parameter values.\n\n Given a unitary quantum gate :math:`U`, we may consider its linear\n transformation in the Heisenberg picture, :math:`U^\\dagger(\\cdot) U`.\n\n If the gate is Gaussian, this linear transformation preserves the polynomial order\n of any observables that are polynomials in :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n This also means it maps :math:`\\text{span}(\\mathbf{r})` into itself:\n\n .. math:: U^\\dagger \\mathbf{r}_i U = \\sum_j \\tilde{U}_{ij} \\mathbf{r}_j\n\n For Gaussian CV gates, this method returns the transformation matrix for\n the current parameter values of the Operation. The method is not defined\n for non-Gaussian (and non-CV) gates.\n\n Args:\n wires (Wires): wires on the device that the observable gets applied to\n inverse (bool): if True, return the inverse transformation instead\n\n Raises:\n RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method\n\n Returns:\n array[float]: :math:`\\tilde{U}`, the Heisenberg picture representation of the linear transformation\n \"\"\"\n p = self.parameters\n if inverse:\n if self.par_domain == \"A\":\n # TODO: expand this for the new par domain class, for non-unitary matrices.\n p[0] = np.linalg.inv(p[0])\n else:\n p[0] = -p[0] # negate first parameter\n U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none\n\n # not defined?\n if U is None:\n raise RuntimeError(\n \"{} is not a Gaussian operation, or is missing the _heisenberg_rep method.\".format(\n self.name\n )\n )\n\n return self.heisenberg_expand(U, wires)\n\n\nclass CVObservable(CV, Observable):\n r\"\"\"Base class for continuous-variable observables.\n\n The class attribute :attr:`~.ev_order` can be defined to indicate\n to PennyLane whether the corresponding CV observable is a polynomial in the\n quadrature operators. If so,\n\n * ``ev_order = 1`` indicates a first order polynomial in quadrature\n operators :math:`(\\x, \\p)`.\n\n * ``ev_order = 2`` indicates a second order polynomial in quadrature\n operators :math:`(\\x, \\p)`.\n\n If :attr:`~.ev_order` is not ``None``, then the Heisenberg representation\n of the observable should be defined in the static method :meth:`~.CV._heisenberg_rep`,\n returning an array of the correct dimension.\n \"\"\"\n # pylint: disable=abstract-method\n ev_order = None #: None, int: if not None, the observable is a polynomial of the given order in `(x, p)`.\n\n def heisenberg_obs(self, wires):\n r\"\"\"Representation of the observable in the position/momentum operator basis.\n\n Returns the expansion :math:`q` of the observable, :math:`Q`, in the\n basis :math:`\\mathbf{r} = (\\I, \\x_0, \\p_0, \\x_1, \\p_1, \\ldots)`.\n\n * For first-order observables returns a real vector such\n that :math:`Q = \\sum_i q_i \\mathbf{r}_i`.\n\n * For second-order observables returns a real symmetric matrix\n such that :math:`Q = \\sum_{ij} q_{ij} \\mathbf{r}_i \\mathbf{r}_j`.\n\n Args:\n wires (Wires): wires on the device that the observable gets applied to\n Returns:\n array[float]: :math:`q`\n \"\"\"\n p = self.parameters\n U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none\n return self.heisenberg_expand(U, wires)\n\n\ndef operation_derivative(operation) -> np.ndarray:\n r\"\"\"Calculate the derivative of an operation.\n\n For an operation :math:`e^{i \\hat{H} \\phi t}`, this function returns the matrix representation\n in the standard basis of its derivative with respect to :math:`t`, i.e.,\n\n .. math:: \\frac{d \\, e^{i \\hat{H} \\phi t}}{dt} = i \\phi \\hat{H} e^{i \\hat{H} \\phi t},\n\n where :math:`\\phi` is a real constant.\n\n Args:\n operation (.Operation): The operation to be differentiated.\n\n Returns:\n array: the derivative of the operation as a matrix in the standard basis\n\n Raises:\n ValueError: if the operation does not have a generator or is not composed of a single\n trainable parameter\n \"\"\"\n generator, prefactor = operation.generator\n\n if generator is None:\n raise ValueError(f\"Operation {operation.name} does not have a generator\")\n if operation.num_params != 1:\n # Note, this case should already be caught by the previous raise since we haven't worked out\n # how to have an operator for multiple parameters. It is added here in case of a future\n # change\n raise ValueError(\n f\"Operation {operation.name} is not written in terms of a single parameter\"\n )\n\n if not isinstance(generator, np.ndarray):\n generator = generator.matrix\n\n if operation.inverse:\n prefactor *= -1\n generator = generator.conj().T\n\n return 1j * prefactor * generator @ operation.matrix\n", "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUnit tests for the :mod:`pennylane` :class:`QubitDevice` class.\n\"\"\"\nimport pytest\nimport numpy as np\nfrom random import random\n\nimport pennylane as qml\nfrom pennylane import QubitDevice, DeviceError\nfrom pennylane.qnodes import QuantumFunctionError\nfrom pennylane.qnodes.base import BaseQNode\nfrom pennylane.operation import Sample, Variance, Expectation, Probability, State\nfrom pennylane.circuit_graph import CircuitGraph\nfrom pennylane.variable import Variable\nfrom pennylane.wires import Wires\nfrom pennylane.tape import QuantumTape\nfrom pennylane.tape.measure import state\n\nmock_qubit_device_paulis = [\"PauliX\", \"PauliY\", \"PauliZ\"]\nmock_qubit_device_rotations = [\"RX\", \"RY\", \"RZ\"]\n\n\n# pylint: disable=abstract-class-instantiated, no-self-use, redefined-outer-name, invalid-name\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_qubit_device(monkeypatch):\n \"\"\" A function to create a mock device that mocks most of the methods except for e.g. probability()\"\"\"\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"__abstractmethods__\", frozenset())\n m.setattr(QubitDevice, \"_capabilities\", mock_qubit_device_capabilities)\n m.setattr(QubitDevice, \"operations\", [\"PauliY\", \"RX\", \"Rot\"])\n m.setattr(QubitDevice, \"observables\", [\"PauliZ\"])\n m.setattr(QubitDevice, \"short_name\", \"MockDevice\")\n m.setattr(QubitDevice, \"expval\", lambda self, x: 0)\n m.setattr(QubitDevice, \"var\", lambda self, x: 0)\n m.setattr(QubitDevice, \"sample\", lambda self, x: 0)\n m.setattr(QubitDevice, \"apply\", lambda self, x: None)\n\n def get_qubit_device(wires=1):\n return QubitDevice(wires=wires)\n\n yield get_qubit_device\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_qubit_device_extract_stats(monkeypatch):\n \"\"\" A function to create a mock device that mocks the methods related to\n statistics (expval, var, sample, probability)\"\"\"\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"__abstractmethods__\", frozenset())\n m.setattr(QubitDevice, \"_capabilities\", mock_qubit_device_capabilities)\n m.setattr(QubitDevice, \"operations\", [\"PauliY\", \"RX\", \"Rot\"])\n m.setattr(QubitDevice, \"observables\", [\"PauliZ\"])\n m.setattr(QubitDevice, \"short_name\", \"MockDevice\")\n m.setattr(QubitDevice, \"expval\", lambda self, x: 0)\n m.setattr(QubitDevice, \"var\", lambda self, x: 0)\n m.setattr(QubitDevice, \"sample\", lambda self, x: 0)\n m.setattr(QubitDevice, \"state\", 0)\n m.setattr(QubitDevice, \"density_matrix\", lambda self, wires=None: 0)\n m.setattr(\n QubitDevice, \"probability\", lambda self, wires=None: 0\n )\n m.setattr(QubitDevice, \"apply\", lambda self, x: x)\n\n def get_qubit_device(wires=1):\n return QubitDevice(wires=wires)\n\n yield get_qubit_device\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_qubit_device_with_original_statistics(monkeypatch):\n \"\"\" A function to create a mock device that mocks only basis methods and uses the original\n statistics related methods\"\"\"\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"__abstractmethods__\", frozenset())\n m.setattr(QubitDevice, \"_capabilities\", mock_qubit_device_capabilities)\n m.setattr(QubitDevice, \"operations\", [\"PauliY\", \"RX\", \"Rot\"])\n m.setattr(QubitDevice, \"observables\", [\"PauliZ\"])\n m.setattr(QubitDevice, \"short_name\", \"MockDevice\")\n\n def get_qubit_device(wires=1):\n return QubitDevice(wires=wires)\n\n yield get_qubit_device\n\n\nmock_qubit_device_capabilities = {\n \"measurements\": \"everything\",\n \"returns_state\": True,\n \"noise_models\": [\"depolarizing\", \"bitflip\"],\n}\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_qubit_device_with_paulis_and_methods(monkeypatch):\n \"\"\"A function to create a mock device that supports Paulis in its capabilities\"\"\"\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"__abstractmethods__\", frozenset())\n m.setattr(QubitDevice, \"_capabilities\", mock_qubit_device_capabilities)\n m.setattr(QubitDevice, \"operations\", mock_qubit_device_paulis)\n m.setattr(QubitDevice, \"observables\", mock_qubit_device_paulis)\n m.setattr(QubitDevice, \"short_name\", \"MockDevice\")\n m.setattr(QubitDevice, \"expval\", lambda self, x: 0)\n m.setattr(QubitDevice, \"var\", lambda self, x: 0)\n m.setattr(QubitDevice, \"sample\", lambda self, x: 0)\n m.setattr(QubitDevice, \"apply\", lambda self, x, rotations: None)\n\n def get_qubit_device(wires=1):\n return QubitDevice(wires=wires)\n\n yield get_qubit_device\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_qubit_device_with_paulis_rotations_and_methods(monkeypatch):\n \"\"\"A function to create a mock device that supports Paulis in its capabilities\"\"\"\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"__abstractmethods__\", frozenset())\n m.setattr(QubitDevice, \"_capabilities\", mock_qubit_device_capabilities)\n m.setattr(QubitDevice, \"operations\", mock_qubit_device_paulis + mock_qubit_device_rotations)\n m.setattr(QubitDevice, \"observables\", mock_qubit_device_paulis)\n m.setattr(QubitDevice, \"short_name\", \"MockDevice\")\n m.setattr(QubitDevice, \"expval\", lambda self, x: 0)\n m.setattr(QubitDevice, \"var\", lambda self, x: 0)\n m.setattr(QubitDevice, \"sample\", lambda self, x: 0)\n m.setattr(QubitDevice, \"apply\", lambda self, x: None)\n\n def get_qubit_device(wires=1):\n return QubitDevice(wires=wires)\n\n yield get_qubit_device\n\n\nclass TestOperations:\n \"\"\"Tests the logic related to operations\"\"\"\n\n def test_op_queue_accessed_outside_execution_context(self, mock_qubit_device):\n \"\"\"Tests that a call to op_queue outside the execution context raises the correct error\"\"\"\n\n with pytest.raises(\n ValueError, match=\"Cannot access the operation queue outside of the execution context!\"\n ):\n dev = mock_qubit_device()\n dev.op_queue\n\n def test_op_queue_is_filled_during_execution(\n self, mock_qubit_device_with_paulis_and_methods, monkeypatch\n ):\n \"\"\"Tests that the op_queue is correctly filled when apply is called and that accessing\n op_queue raises no error\"\"\"\n queue = [qml.PauliX(wires=0), qml.PauliY(wires=1), qml.PauliZ(wires=2)]\n\n observables = [qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(1)), qml.sample(qml.PauliZ(2))]\n\n circuit_graph = CircuitGraph(queue + observables, {}, Wires([0, 1, 2]))\n\n call_history = []\n\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"apply\",\n lambda self, x, **kwargs: call_history.extend(x + kwargs.get('rotations', [])))\n m.setattr(QubitDevice, \"analytic_probability\", lambda *args: None)\n dev = mock_qubit_device_with_paulis_and_methods()\n dev.execute(circuit_graph)\n\n assert call_history == queue\n\n assert len(call_history) == 3\n assert isinstance(call_history[0], qml.PauliX)\n assert call_history[0].wires == Wires([0])\n\n assert isinstance(call_history[1], qml.PauliY)\n assert call_history[1].wires == Wires([1])\n\n assert isinstance(call_history[2], qml.PauliZ)\n assert call_history[2].wires == Wires([2])\n\n def test_unsupported_operations_raise_error(self, mock_qubit_device_with_paulis_and_methods):\n \"\"\"Tests that the operations are properly applied and queued\"\"\"\n queue = [qml.PauliX(wires=0), qml.PauliY(wires=1), qml.Hadamard(wires=2)]\n\n observables = [qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(1)), qml.sample(qml.PauliZ(2))]\n\n circuit_graph = CircuitGraph(queue + observables, {}, Wires([0, 1, 2]))\n\n with pytest.raises(DeviceError, match=\"Gate Hadamard not supported on device\"):\n dev = mock_qubit_device_with_paulis_and_methods()\n dev.execute(circuit_graph)\n\n numeric_queues = [\n [\n qml.RX(0.3, wires=[0])\n ],\n [\n qml.RX(0.3, wires=[0]),\n qml.RX(0.4, wires=[1]),\n qml.RX(0.5, wires=[2]),\n ]\n ]\n\n variable = Variable(1)\n symbolic_queue = [\n [qml.RX(variable, wires=[0])],\n ]\n\n observables = [\n [qml.PauliZ(0)],\n [qml.PauliX(0)],\n [qml.PauliY(0)]\n ]\n\n @pytest.mark.parametrize(\"observables\", observables)\n @pytest.mark.parametrize(\"queue\", numeric_queues + symbolic_queue)\n def test_passing_keyword_arguments_to_execute(self, mock_qubit_device_with_paulis_rotations_and_methods,\n monkeypatch, queue, observables):\n \"\"\"Tests that passing keyword arguments to execute propagates those kwargs to the apply()\n method\"\"\"\n circuit_graph = CircuitGraph(queue + observables, {}, Wires([0, 1, 2]))\n\n call_history = {}\n\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"apply\", lambda self, x, **kwargs: call_history.update(kwargs))\n dev = mock_qubit_device_with_paulis_rotations_and_methods()\n dev.execute(circuit_graph, hash=circuit_graph.hash)\n\n len(call_history.items()) == 1\n call_history[\"hash\"] = circuit_graph.hash\n\n\nclass TestObservables:\n \"\"\"Tests the logic related to observables\"\"\"\n\n # pylint: disable=no-self-use, redefined-outer-name\n\n def test_obs_queue_accessed_outside_execution_context(self, mock_qubit_device):\n \"\"\"Tests that a call to op_queue outside the execution context raises the correct error\"\"\"\n\n with pytest.raises(\n ValueError,\n match=\"Cannot access the observable value queue outside of the execution context!\",\n ):\n dev = mock_qubit_device()\n dev.obs_queue\n\n def test_unsupported_observables_raise_error(self, mock_qubit_device_with_paulis_and_methods):\n \"\"\"Tests that the operations are properly applied and queued\"\"\"\n queue = [qml.PauliX(wires=0), qml.PauliY(wires=1), qml.PauliZ(wires=2)]\n\n observables = [\n qml.expval(qml.Hadamard(0)),\n qml.var(qml.PauliZ(1)),\n qml.sample(qml.PauliZ(2)),\n ]\n\n circuit_graph = CircuitGraph(queue + observables, {}, Wires([0, 1, 2]))\n\n with pytest.raises(DeviceError, match=\"Observable Hadamard not supported on device\"):\n dev = mock_qubit_device_with_paulis_and_methods()\n dev.execute(circuit_graph)\n\n def test_unsupported_observable_return_type_raise_error(\n self, mock_qubit_device_with_paulis_and_methods, monkeypatch\n ):\n \"\"\"Check that an error is raised if the return type of an observable is unsupported\"\"\"\n\n queue = [qml.PauliX(wires=0)]\n\n # Make a observable without specifying a return operation upon measuring\n obs = qml.PauliZ(0)\n obs.return_type = \"SomeUnsupportedReturnType\"\n observables = [obs]\n\n circuit_graph = CircuitGraph(queue + observables, {}, Wires([0]))\n\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"apply\", lambda self, x, **kwargs: None)\n with pytest.raises(\n QuantumFunctionError, match=\"Unsupported return type specified for observable\"\n ):\n dev = mock_qubit_device_with_paulis_and_methods()\n dev.execute(circuit_graph)\n\n\nclass TestParameters:\n \"\"\"Test for checking device parameter mappings\"\"\"\n\n def test_parameters_accessed_outside_execution_context(self, mock_qubit_device):\n \"\"\"Tests that a call to parameters outside the execution context raises the correct error\"\"\"\n\n with pytest.raises(\n ValueError,\n match=\"Cannot access the free parameter mapping outside of the execution context!\",\n ):\n dev = mock_qubit_device()\n dev.parameters\n\n\nclass TestExtractStatistics:\n \"\"\"Test the statistics method\"\"\"\n\n @pytest.mark.parametrize(\"returntype\", [Expectation, Variance, Sample, Probability, State])\n def test_results_created(self, mock_qubit_device_extract_stats, monkeypatch, returntype):\n \"\"\"Tests that the statistics method simply builds a results list without any side-effects\"\"\"\n\n class SomeObservable(qml.operation.Observable):\n num_params = 0\n num_wires = 1\n par_domain = \"F\"\n return_type = returntype\n\n obs = SomeObservable(wires=0)\n\n with monkeypatch.context() as m:\n dev = mock_qubit_device_extract_stats()\n results = dev.statistics([obs])\n\n assert results == [0]\n\n def test_results_no_state(self, mock_qubit_device_extract_stats, monkeypatch):\n \"\"\"Tests that the statistics method raises an AttributeError when a State return type is\n requested when QubitDevice does not have a state attribute\"\"\"\n with monkeypatch.context():\n dev = mock_qubit_device_extract_stats()\n delattr(dev.__class__, \"state\")\n with pytest.raises(QuantumFunctionError, match=\"The state is not available in the current\"):\n dev.statistics([state()])\n\n @pytest.mark.parametrize(\"returntype\", [None])\n def test_results_created_empty(self, mock_qubit_device_extract_stats, monkeypatch, returntype):\n \"\"\"Tests that the statistics method returns an empty list if the return type is None\"\"\"\n\n class SomeObservable(qml.operation.Observable):\n num_params = 0\n num_wires = 1\n par_domain = \"F\"\n return_type = returntype\n\n obs = SomeObservable(wires=0)\n\n with monkeypatch.context() as m:\n dev = mock_qubit_device_extract_stats()\n results = dev.statistics([obs])\n\n assert results == []\n\n @pytest.mark.parametrize(\"returntype\", [\"not None\"])\n def test_error_return_type_none(self, mock_qubit_device_extract_stats, monkeypatch, returntype):\n \"\"\"Tests that the statistics method raises an error if the return type is not well-defined and is not None\"\"\"\n\n assert returntype not in [Expectation, Variance, Sample, Probability, State, None]\n\n class SomeObservable(qml.operation.Observable):\n num_params = 0\n num_wires = 1\n par_domain = \"F\"\n return_type = returntype\n\n obs = SomeObservable(wires=0)\n\n with pytest.raises(QuantumFunctionError, match=\"Unsupported return type\"):\n dev = mock_qubit_device_extract_stats()\n dev.statistics([obs])\n\n\nclass TestGenerateSamples:\n \"\"\"Test the generate_samples method\"\"\"\n\n def test_auxiliary_methods_called_correctly(self, mock_qubit_device, monkeypatch):\n \"\"\"Tests that the generate_samples method calls on its auxiliary methods correctly\"\"\"\n\n dev = mock_qubit_device()\n number_of_states = 2 ** dev.num_wires\n\n with monkeypatch.context() as m:\n # Mock the auxiliary methods such that they return the expected values\n m.setattr(QubitDevice, \"sample_basis_states\", lambda self, wires, b: wires)\n m.setattr(QubitDevice, \"states_to_binary\", lambda a, b: (a, b))\n m.setattr(QubitDevice, \"analytic_probability\", lambda *args: None)\n dev._samples = dev.generate_samples()\n\n assert dev._samples == (number_of_states, dev.num_wires)\n\n\nclass TestSampleBasisStates:\n \"\"\"Test the sample_basis_states method\"\"\"\n\n def test_sampling_with_correct_arguments(self, mock_qubit_device, monkeypatch):\n \"\"\"Tests that the sample_basis_states method samples with the correct arguments\"\"\"\n\n shots = 1000\n\n number_of_states = 4\n dev = mock_qubit_device()\n dev.shots = shots\n state_probs = [0.1, 0.2, 0.3, 0.4]\n\n with monkeypatch.context() as m:\n # Mock the numpy.random.choice method such that it returns the expected values\n m.setattr(\"numpy.random.choice\", lambda x, y, p: (x, y, p))\n res = dev.sample_basis_states(number_of_states, state_probs)\n\n assert np.array_equal(res[0], np.array([0, 1, 2, 3]))\n assert res[1] == shots\n assert res[2] == state_probs\n\n\nclass TestStatesToBinary:\n \"\"\"Test the states_to_binary method\"\"\"\n\n def test_correct_conversion_two_states(self, mock_qubit_device):\n \"\"\"Tests that the sample_basis_states method converts samples to binary correctly\"\"\"\n wires = 4\n shots = 10\n\n number_of_states = 2 ** wires\n basis_states = np.arange(number_of_states)\n samples = np.random.choice(basis_states, shots)\n\n dev = mock_qubit_device()\n res = dev.states_to_binary(samples, wires)\n\n format_smt = \"{{:0{}b}}\".format(wires)\n expected = np.array([[int(x) for x in list(format_smt.format(i))] for i in samples])\n\n assert np.all(res == expected)\n\n test_binary_conversion_data = [\n (np.array([2, 3, 2, 0, 0]), np.array([[1, 0], [1, 1], [1, 0], [0, 0], [0, 0]])),\n (np.array([2, 3, 1, 3, 1]), np.array([[1, 0], [1, 1], [0, 1], [1, 1], [0, 1]])),\n (\n np.array([7, 7, 1, 5, 2]),\n np.array([[1, 1, 1], [1, 1, 1], [0, 0, 1], [1, 0, 1], [0, 1, 0]]),\n ),\n ]\n\n @pytest.mark.parametrize(\"samples, binary_states\", test_binary_conversion_data)\n def test_correct_conversion(self, mock_qubit_device, samples, binary_states, tol):\n \"\"\"Tests that the states_to_binary method converts samples to binary correctly\"\"\"\n dev = mock_qubit_device()\n dev.shots = 5\n wires = binary_states.shape[1]\n res = dev.states_to_binary(samples, wires)\n assert np.allclose(res, binary_states, atol=tol, rtol=0)\n\n\nclass TestExpval:\n \"\"\"Test the expval method\"\"\"\n\n def test_analytic_expval(self, mock_qubit_device_with_original_statistics, monkeypatch):\n \"\"\"Tests that expval method when the analytic attribute is True\n\n Additional QubitDevice methods that are mocked:\n -rotate_basis\n -probability\n \"\"\"\n obs = qml.PauliX(0)\n probs = [0.5, 0.5]\n dev = mock_qubit_device_with_original_statistics()\n\n assert dev.analytic\n\n call_history = []\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"probability\", lambda self, wires=None: probs)\n res = dev.expval(obs)\n\n assert res == (obs.eigvals @ probs).real\n\n def test_non_analytic_expval(self, mock_qubit_device_with_original_statistics, monkeypatch):\n \"\"\"Tests that expval method when the analytic attribute is False\n\n Additional QubitDevice methods that are mocked:\n -rotate_basis\n -sample\n -numpy.mean\n \"\"\"\n obs = qml.PauliX(0)\n dev = mock_qubit_device_with_original_statistics()\n\n assert dev.analytic\n dev.analytic = False\n\n assert not dev.analytic\n\n call_history = []\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"sample\", lambda self, obs: obs)\n m.setattr(\"numpy.mean\", lambda obs: obs)\n res = dev.expval(obs)\n\n assert res == obs\n\n\nclass TestVar:\n \"\"\"Test the var method\"\"\"\n\n def test_analytic_var(self, mock_qubit_device_with_original_statistics, monkeypatch):\n \"\"\"Tests that var method when the analytic attribute is True\n\n Additional QubitDevice methods that are mocked:\n -rotate_basis\n -probability\n \"\"\"\n obs = qml.PauliX(0)\n probs = [0.5, 0.5]\n dev = mock_qubit_device_with_original_statistics()\n\n assert dev.analytic\n\n call_history = []\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"probability\", lambda self, wires=None: probs)\n res = dev.var(obs)\n\n assert res == (obs.eigvals ** 2) @ probs - (obs.eigvals @ probs).real ** 2\n\n def test_non_analytic_var(self, mock_qubit_device_with_original_statistics, monkeypatch):\n \"\"\"Tests that var method when the analytic attribute is False\n\n Additional QubitDevice methods that are mocked:\n -rotate_basis\n -sample\n -numpy.var\n \"\"\"\n obs = qml.PauliX(0)\n dev = mock_qubit_device_with_original_statistics()\n\n assert dev.analytic\n dev.analytic = False\n\n assert not dev.analytic\n\n call_history = []\n with monkeypatch.context() as m:\n m.setattr(QubitDevice, \"sample\", lambda self, obs: obs)\n m.setattr(\"numpy.var\", lambda obs: obs)\n res = dev.var(obs)\n\n assert res == obs\n\n\nclass TestSample:\n \"\"\"Test the sample method\"\"\"\n\n def test_only_ones_minus_ones(\n self, mock_qubit_device_with_original_statistics, monkeypatch, tol\n ):\n \"\"\"Test that pauli_eigvals_as_samples method only produces -1 and 1 samples\"\"\"\n obs = qml.PauliX(0)\n dev = mock_qubit_device_with_original_statistics()\n dev._samples = np.array([[1, 0], [0, 0]])\n\n with monkeypatch.context() as m:\n res = dev.sample(obs)\n\n assert np.allclose(res ** 2, 1, atol=tol, rtol=0)\n\n def test_correct_custom_eigenvalues(\n self, mock_qubit_device_with_original_statistics, monkeypatch, tol\n ):\n \"\"\"Test that pauli_eigvals_as_samples method only produces samples of eigenvalues\"\"\"\n obs = qml.PauliX(0) @ qml.PauliZ(1)\n dev = mock_qubit_device_with_original_statistics(wires=2)\n dev._samples = np.array([[1, 0], [0, 0]])\n\n with monkeypatch.context() as m:\n res = dev.sample(obs)\n\n assert np.array_equal(res, np.array([-1, 1]))\n\n\nclass TestEstimateProb:\n \"\"\"Test the estimate_probability method\"\"\"\n\n @pytest.mark.parametrize(\"wires, expected\", [([0], [0.5, 0.5]),\n (None, [0.5, 0, 0, 0.5]),\n ([0, 1], [0.5, 0, 0, 0.5])\n ])\n def test_estimate_probability(self, wires, expected, mock_qubit_device_with_original_statistics, monkeypatch):\n \"\"\"Tests probability method when the analytic attribute is True.\"\"\"\n dev = mock_qubit_device_with_original_statistics(wires=2)\n samples = np.array([[0, 0], [1, 1], [1, 1], [0, 0]])\n\n with monkeypatch.context() as m:\n m.setattr(dev, \"_samples\", samples)\n m.setattr(dev, \"shots\", 4)\n res = dev.estimate_probability(wires=wires)\n\n assert np.allclose(res, expected)\n\n\nclass TestMarginalProb:\n \"\"\"Test the marginal_prob method\"\"\"\n\n @pytest.mark.parametrize(\n \"wires, inactive_wires\",\n [\n ([0], [1, 2]),\n ([1], [0, 2]),\n ([2], [0, 1]),\n ([0, 1], [2]),\n ([0, 2], [1]),\n ([1, 2], [0]),\n ([0, 1, 2], []),\n (Wires([0]), [1, 2]),\n (Wires([0, 1]), [2]),\n (Wires([0, 1, 2]), []),\n ],\n )\n def test_correct_arguments_for_marginals(\n self, mock_qubit_device_with_original_statistics, mocker, wires, inactive_wires, tol\n ):\n \"\"\"Test that the correct arguments are passed to the marginal_prob method\"\"\"\n\n # Generate probabilities\n probs = np.array([random() for i in range(2 ** 3)])\n probs /= sum(probs)\n\n spy = mocker.spy(np, \"sum\")\n dev = mock_qubit_device_with_original_statistics(wires=3)\n res = dev.marginal_prob(probs, wires=wires)\n array_call = spy.call_args[0][0]\n axis_call = spy.call_args[1]['axis']\n\n assert np.allclose(array_call.flatten(), probs, atol=tol, rtol=0)\n assert axis_call == tuple(inactive_wires)\n\n marginal_test_data = [\n (np.array([0.1, 0.2, 0.3, 0.4]), np.array([0.4, 0.6]), [1]),\n (np.array([0.1, 0.2, 0.3, 0.4]), np.array([0.3, 0.7]), Wires([0])),\n (\n np.array(\n [\n 0.17794671,\n 0.06184147,\n 0.21909549,\n 0.04932204,\n 0.19595214,\n 0.19176834,\n 0.08495311,\n 0.0191207,\n ]\n ),\n np.array([0.3970422, 0.28090525, 0.11116351, 0.21088904]),\n [2, 0],\n ),\n ]\n\n @pytest.mark.parametrize(\"probs, marginals, wires\", marginal_test_data)\n def test_correct_marginals_returned(\n self, mock_qubit_device_with_original_statistics, probs, marginals, wires, tol\n ):\n \"\"\"Test that the correct marginals are returned by the marginal_prob method\"\"\"\n num_wires = int(np.log2(len(probs)))\n dev = mock_qubit_device_with_original_statistics(num_wires)\n res = dev.marginal_prob(probs, wires=wires)\n assert np.allclose(res, marginals, atol=tol, rtol=0)\n\n @pytest.mark.parametrize(\"probs, marginals, wires\", marginal_test_data)\n def test_correct_marginals_returned_wires_none(\n self, mock_qubit_device_with_original_statistics, probs, marginals, wires, tol\n ):\n \"\"\"Test that passing wires=None simply returns the original probability.\"\"\"\n num_wires = int(np.log2(len(probs)))\n dev = mock_qubit_device_with_original_statistics(wires=num_wires)\n dev.num_wires = num_wires\n\n res = dev.marginal_prob(probs, wires=None)\n assert np.allclose(res, probs, atol=tol, rtol=0)\n\n\nclass TestActiveWires:\n \"\"\"Test that the active_wires static method works as required.\"\"\"\n\n def test_active_wires_from_queue(self, mock_qubit_device):\n queue = [\n qml.CNOT(wires=[0, 2]),\n qml.RX(0.2, wires=0),\n qml.expval(qml.PauliX(wires=5))\n ]\n\n dev = mock_qubit_device(wires=6)\n res = dev.active_wires(queue)\n\n assert res == Wires([0, 2, 5])\n\n\nclass TestCapabilities:\n \"\"\"Test that a default qubit device defines capabilities that all devices inheriting\n from it will automatically have.\"\"\"\n\n def test_defines_correct_capabilities(self):\n \"\"\"Test that the device defines the right capabilities\"\"\"\n capabilities = {\"model\": \"qubit\",\n \"supports_finite_shots\": True,\n \"supports_tensor_observables\": True,\n \"returns_probs\": True,\n }\n assert capabilities == QubitDevice.capabilities()\n\n\nclass TestExecution:\n \"\"\"Tests for the execute method\"\"\"\n\n def test_device_executions(self):\n \"\"\"Test the number of times a qubit device is executed over a QNode's\n lifetime is tracked by `num_executions`\"\"\"\n\n dev_1 = qml.device(\"default.qubit\", wires=2)\n\n def circuit_1(x, y):\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n node_1 = BaseQNode(circuit_1, dev_1)\n num_evals_1 = 10\n\n for _ in range(num_evals_1):\n node_1(0.432, 0.12)\n assert dev_1.num_executions == num_evals_1\n\n # test a second instance of a default qubit device\n dev_2 = qml.device(\"default.qubit\", wires=2)\n\n def circuit_2(x, y):\n qml.RX(x, wires=[0])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n node_2 = BaseQNode(circuit_2, dev_2)\n num_evals_2 = 5\n\n for _ in range(num_evals_2):\n node_2(0.432, 0.12)\n assert dev_2.num_executions == num_evals_2\n\n # test a new circuit on an existing instance of a qubit device\n def circuit_3(x, y):\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n node_3 = BaseQNode(circuit_3, dev_1)\n num_evals_3 = 7\n\n for _ in range(num_evals_3):\n node_3(0.432, 0.12)\n assert dev_1.num_executions == num_evals_1 + num_evals_3\n\n def test_same_hash(self):\n \"\"\"Test that executing the same tape twice produces the same circuit\n hash.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n with QuantumTape() as tape0:\n qml.RZ(0.3, wires=[0])\n qml.expval(qml.PauliX(0))\n\n tape0.execute(dev)\n orig_hash = dev.circuit_hash\n\n tape0.execute(dev)\n new_hash = dev.circuit_hash\n assert orig_hash == new_hash\n\n def test_different_hash(self):\n \"\"\"Test that executing different tapes affects the circuit hash.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n with QuantumTape() as tape0:\n qml.RZ(0.3, wires=[0])\n qml.expval(qml.PauliX(0))\n\n tape0.execute(dev)\n orig_hash = dev.circuit_hash\n\n with QuantumTape() as tape1:\n qml.RY(1.3, wires=[0])\n qml.RX(0.9, wires=[0])\n qml.expval(qml.PauliX(0))\n\n tape1.execute(dev)\n new_hash = dev.circuit_hash\n assert orig_hash != new_hash\n\n\nclass TestBatchExecution:\n \"\"\"Tests for the batch_execute method.\"\"\"\n\n with qml.tape.QuantumTape() as tape1:\n qml.PauliX(wires=0)\n qml.expval(qml.PauliZ(wires=0)), qml.expval(qml.PauliZ(wires=1))\n\n with qml.tape.JacobianTape() as tape2:\n qml.PauliX(wires=0)\n qml.expval(qml.PauliZ(wires=0))\n\n @pytest.mark.parametrize(\"n_tapes\", [1, 2, 3])\n def test_calls_to_execute(self, n_tapes, mocker, mock_qubit_device_with_paulis_and_methods):\n \"\"\"Tests that the device's execute method is called the correct number of times.\"\"\"\n\n dev = mock_qubit_device_with_paulis_and_methods(wires=2)\n spy = mocker.spy(QubitDevice, \"execute\")\n\n tapes = [self.tape1] * n_tapes\n dev.batch_execute(tapes)\n\n assert spy.call_count == n_tapes\n\n @pytest.mark.parametrize(\"n_tapes\", [1, 2, 3])\n def test_calls_to_reset(self, n_tapes, mocker, mock_qubit_device_with_paulis_and_methods):\n \"\"\"Tests that the device's reset method is called the correct number of times.\"\"\"\n\n dev = mock_qubit_device_with_paulis_and_methods(wires=2)\n\n spy = mocker.spy(QubitDevice, \"reset\")\n\n tapes = [self.tape1] * n_tapes\n dev.batch_execute(tapes)\n\n assert spy.call_count == n_tapes\n\n def test_result(self, mock_qubit_device_with_paulis_and_methods, tol):\n \"\"\"Tests that the result has the correct shape and entry types.\"\"\"\n\n dev = mock_qubit_device_with_paulis_and_methods(wires=2)\n\n tapes = [self.tape1, self.tape2]\n res = dev.batch_execute(tapes)\n\n assert len(res) == 2\n assert np.allclose(res[0], dev.execute(self.tape1), rtol=tol, atol=0)\n assert np.allclose(res[1], dev.execute(self.tape2), rtol=tol, atol=0)\n\n def test_result_empty_tape(self, mock_qubit_device_with_paulis_and_methods, tol):\n \"\"\"Tests that the result has the correct shape and entry types for empty tapes.\"\"\"\n\n dev = mock_qubit_device_with_paulis_and_methods(wires=2)\n\n empty_tape = qml.tape.QuantumTape()\n tapes = [empty_tape] * 3\n res = dev.batch_execute(tapes)\n\n assert len(res) == 3\n assert np.allclose(res[0], dev.execute(empty_tape), rtol=tol, atol=0)\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.linalg.multi_dot", "numpy.zeros", "numpy.eye", "numpy.linalg.inv", "numpy.kron" ], [ "numpy.array", "numpy.random.choice", "numpy.allclose", "numpy.arange", "numpy.all" ] ]
syelman/DRDM-Count
[ "47b43b64d7f536995bf4cbc44318b3108a5f2aff" ]
[ "losses/bregman_pytorch.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nRewrite ot.bregman.sinkhorn in Python Optimal Transport (https://pythonot.github.io/_modules/ot/bregman.html#sinkhorn)\nusing pytorch operations.\nBregman projections for regularized OT (Sinkhorn distance).\n\"\"\"\n\nimport torch\n\nM_EPS = 1e-16\n\n\ndef sinkhorn(a, b, C, reg=1e-1, method='sinkhorn', maxIter=1000, tau=1e3,\n stopThr=1e-9, verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):\n \"\"\"\n Solve the entropic regularization optimal transport\n The input should be PyTorch tensors\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n method : str\n method used for the solver either 'sinkhorn', 'greenkhorn', 'sinkhorn_stabilized' or\n 'sinkhorn_epsilon_scaling', see those function for specific parameters\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n See Also\n --------\n\n \"\"\"\n\n if method.lower() == 'sinkhorn':\n return sinkhorn_knopp(a, b, C, reg, maxIter=maxIter,\n stopThr=stopThr, verbose=verbose, log=log,\n warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,\n **kwargs)\n elif method.lower() == 'sinkhorn_stabilized':\n return sinkhorn_stabilized(a, b, C, reg, maxIter=maxIter, tau=tau,\n stopThr=stopThr, verbose=verbose, log=log,\n warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,\n **kwargs)\n elif method.lower() == 'sinkhorn_epsilon_scaling':\n return sinkhorn_epsilon_scaling(a, b, C, reg,\n maxIter=maxIter, maxInnerIter=100, tau=tau,\n scaling_base=0.75, scaling_coef=None, stopThr=stopThr,\n verbose=False, log=log, warm_start=warm_start, eval_freq=eval_freq,\n print_freq=print_freq, **kwargs)\n else:\n raise ValueError(\"Unknown method '%s'.\" % method)\n\n\ndef sinkhorn_knopp(a, b, C, reg=1e-1, maxIter=1000, stopThr=1e-9,\n verbose=True, log=True, warm_start=None, eval_freq=10, print_freq=200, **kwargs):\n \"\"\"\n Solve the entropic regularization optimal transport\n The input should be PyTorch tensors\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1].\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n See Also\n --------\n\n \"\"\"\n\n device = a.device\n na, nb = C.shape\n\n assert na >= 1 and nb >= 1, 'C needs to be 2d'\n assert na == a.shape[0] and nb == b.shape[0], \"Shape of a or b does't match that of C\"\n assert reg > 0, 'reg should be greater than 0'\n assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'\n # unnecessary check for our special case\n if log:\n log = {'err': []}\n\n if warm_start is not None:\n u = warm_start['u']\n v = warm_start['v']\n else:\n u = torch.ones(na, dtype=a.dtype).to(device) / na\n v = torch.ones(nb, dtype=b.dtype).to(device) / nb\n\n K = torch.empty(C.shape, dtype=C.dtype).to(device)\n torch.div(C, -reg, out=K)\n torch.exp(K, out=K)\n\n b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)\n\n it = 1\n err = 1\n\n # allocate memory beforehand\n KTu = torch.empty(v.shape, dtype=v.dtype).to(device)\n Kv = torch.empty(u.shape, dtype=u.dtype).to(device)\n\n while (err > stopThr and it <= maxIter):\n upre, vpre = u, v\n torch.matmul(u, K, out=KTu)\n v = torch.div(b, KTu + M_EPS)\n torch.matmul(K, v, out=Kv)\n u = torch.div(a, Kv + M_EPS)\n\n if torch.any(torch.isnan(u)) or torch.any(torch.isnan(v)) or \\\n torch.any(torch.isinf(u)) or torch.any(torch.isinf(v)):\n print('Warning: numerical errors at iteration', it)\n u, v = upre, vpre\n break\n\n if log and it % eval_freq == 0:\n # we can speed up the process by checking for the error only all\n # the eval_freq iterations\n # below is equivalent to:\n # b_hat = torch.sum(u.reshape(-1, 1) * K * v.reshape(1, -1), 0)\n # but with more memory efficient\n b_hat = torch.matmul(u, K) * v\n err = (b - b_hat).pow(2).sum().item()\n # err = (b - b_hat).abs().sum().item()\n log['err'].append(err)\n\n if verbose and it % print_freq == 0:\n print('iteration {:5d}, constraint error {:5e}'.format(it, err))\n\n it += 1\n\n if log:\n log['u'] = u\n log['v'] = v\n log['alpha'] = reg * torch.log(u + M_EPS)\n log['beta'] = reg * torch.log(v + M_EPS)\n\n # transport plan\n P = u.reshape(-1, 1) * K * v.reshape(1, -1)\n if log:\n return P, log\n else:\n return P\n\n\ndef sinkhorn_stabilized(a, b, C, reg=1e-1, maxIter=1000, tau=1e3, stopThr=1e-9,\n verbose=False, log=False, warm_start=None, eval_freq=10, print_freq=200, **kwargs):\n \"\"\"\n Solve the entropic regularization OT problem with log stabilization\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [1]\n but with the log stabilization proposed in [3] an defined in [2] (Algo 3.1)\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n tau : float\n thershold for max value in u or v for log scaling\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019\n [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.\n\n See Also\n --------\n\n \"\"\"\n\n device = a.device\n na, nb = C.shape\n\n assert na >= 1 and nb >= 1, 'C needs to be 2d'\n assert na == a.shape[0] and nb == b.shape[0], \"Shape of a or b does't match that of C\"\n assert reg > 0, 'reg should be greater than 0'\n assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'\n\n if log:\n log = {'err': []}\n\n if warm_start is not None:\n alpha = warm_start['alpha']\n beta = warm_start['beta']\n else:\n alpha = torch.zeros(na, dtype=a.dtype).to(device)\n beta = torch.zeros(nb, dtype=b.dtype).to(device)\n\n u = torch.ones(na, dtype=a.dtype).to(device) / na\n v = torch.ones(nb, dtype=b.dtype).to(device) / nb\n\n def update_K(alpha, beta):\n \"\"\"log space computation\"\"\"\n \"\"\"memory efficient\"\"\"\n torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=K)\n torch.add(K, -C, out=K)\n torch.div(K, reg, out=K)\n torch.exp(K, out=K)\n\n def update_P(alpha, beta, u, v, ab_updated=False):\n \"\"\"log space P (gamma) computation\"\"\"\n torch.add(alpha.reshape(-1, 1), beta.reshape(1, -1), out=P)\n torch.add(P, -C, out=P)\n torch.div(P, reg, out=P)\n if not ab_updated:\n torch.add(P, torch.log(u + M_EPS).reshape(-1, 1), out=P)\n torch.add(P, torch.log(v + M_EPS).reshape(1, -1), out=P)\n torch.exp(P, out=P)\n\n K = torch.empty(C.shape, dtype=C.dtype).to(device)\n update_K(alpha, beta)\n\n b_hat = torch.empty(b.shape, dtype=C.dtype).to(device)\n\n it = 1\n err = 1\n ab_updated = False\n\n # allocate memory beforehand\n KTu = torch.empty(v.shape, dtype=v.dtype).to(device)\n Kv = torch.empty(u.shape, dtype=u.dtype).to(device)\n P = torch.empty(C.shape, dtype=C.dtype).to(device)\n\n while (err > stopThr and it <= maxIter):\n upre, vpre = u, v\n torch.matmul(u, K, out=KTu)\n v = torch.div(b, KTu + M_EPS)\n torch.matmul(K, v, out=Kv)\n u = torch.div(a, Kv + M_EPS)\n\n ab_updated = False\n # remove numerical problems and store them in K\n if u.abs().sum() > tau or v.abs().sum() > tau:\n alpha += reg * torch.log(u + M_EPS)\n beta += reg * torch.log(v + M_EPS)\n u.fill_(1. / na)\n v.fill_(1. / nb)\n update_K(alpha, beta)\n ab_updated = True\n\n if log and it % eval_freq == 0:\n # we can speed up the process by checking for the error only all\n # the eval_freq iterations\n update_P(alpha, beta, u, v, ab_updated)\n b_hat = torch.sum(P, 0)\n err = (b - b_hat).pow(2).sum().item()\n log['err'].append(err)\n\n if verbose and it % print_freq == 0:\n print('iteration {:5d}, constraint error {:5e}'.format(it, err))\n\n it += 1\n\n if log:\n log['u'] = u\n log['v'] = v\n log['alpha'] = alpha + reg * torch.log(u + M_EPS)\n log['beta'] = beta + reg * torch.log(v + M_EPS)\n\n # transport plan\n update_P(alpha, beta, u, v, False)\n\n if log:\n return P, log\n else:\n return P\n\n\ndef sinkhorn_epsilon_scaling(a, b, C, reg=1e-1, maxIter=100, maxInnerIter=100, tau=1e3, scaling_base=0.75,\n scaling_coef=None, stopThr=1e-9, verbose=False, log=False, warm_start=None, eval_freq=10,\n print_freq=200, **kwargs):\n \"\"\"\n Solve the entropic regularization OT problem with log stabilization\n The function solves the following optimization problem:\n\n .. math::\n \\gamma = arg\\min_\\gamma <\\gamma,C>_F + reg\\cdot\\Omega(\\gamma)\n s.t. \\gamma 1 = a\n \\gamma^T 1= b\n \\gamma\\geq 0\n where :\n - C is the (ns,nt) metric cost matrix\n - :math:`\\Omega` is the entropic regularization term :math:`\\Omega(\\gamma)=\\sum_{i,j} \\gamma_{i,j}\\log(\\gamma_{i,j})`\n - a and b are target and source measures (sum to 1)\n\n The algorithm used for solving the problem is the Sinkhorn-Knopp matrix\n scaling algorithm as proposed in [1] but with the log stabilization\n proposed in [3] and the log scaling proposed in [2] algorithm 3.2\n\n Parameters\n ----------\n a : torch.tensor (na,)\n samples measure in the target domain\n b : torch.tensor (nb,)\n samples in the source domain\n C : torch.tensor (na,nb)\n loss matrix\n reg : float\n Regularization term > 0\n tau : float\n thershold for max value in u or v for log scaling\n maxIter : int, optional\n Max number of iterations\n stopThr : float, optional\n Stop threshol on error ( > 0 )\n verbose : bool, optional\n Print information along iterations\n log : bool, optional\n record log if True\n\n Returns\n -------\n gamma : (na x nb) torch.tensor\n Optimal transportation matrix for the given parameters\n log : dict\n log dictionary return only if log==True in parameters\n\n References\n ----------\n [1] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013\n [2] Bernhard Schmitzer. Stabilized Sparse Scaling Algorithms for Entropy Regularized Transport Problems. SIAM Journal on Scientific Computing, 2019\n [3] Chizat, L., Peyré, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprint arXiv:1607.05816.\n\n See Also\n --------\n\n \"\"\"\n\n na, nb = C.shape\n\n assert na >= 1 and nb >= 1, 'C needs to be 2d'\n assert na == a.shape[0] and nb == b.shape[0], \"Shape of a or b does't match that of C\"\n assert reg > 0, 'reg should be greater than 0'\n assert a.min() >= 0. and b.min() >= 0., 'Elements in a or b less than 0'\n\n def get_reg(it, reg, pre_reg):\n if it == 1:\n return scaling_coef\n else:\n if (pre_reg - reg) * scaling_base < M_EPS:\n return reg\n else:\n return (pre_reg - reg) * scaling_base + reg\n\n if scaling_coef is None:\n scaling_coef = C.max() + reg\n\n it = 1\n err = 1\n running_reg = scaling_coef\n\n if log:\n log = {'err': []}\n\n warm_start = None\n\n while (err > stopThr and it <= maxIter):\n running_reg = get_reg(it, reg, running_reg)\n P, _log = sinkhorn_stabilized(a, b, C, running_reg, maxIter=maxInnerIter, tau=tau,\n stopThr=stopThr, verbose=False, log=True,\n warm_start=warm_start, eval_freq=eval_freq, print_freq=print_freq,\n **kwargs)\n\n warm_start = {}\n warm_start['alpha'] = _log['alpha']\n warm_start['beta'] = _log['beta']\n\n primal_val = (C * P).sum() + reg * (P * torch.log(P)).sum() - reg * P.sum()\n dual_val = (_log['alpha'] * a).sum() + (_log['beta'] * b).sum() - reg * P.sum()\n err = primal_val - dual_val\n log['err'].append(err)\n\n if verbose and it % print_freq == 0:\n print('iteration {:5d}, constraint error {:5e}'.format(it, err))\n\n it += 1\n\n if log:\n log['alpha'] = _log['alpha']\n log['beta'] = _log['beta']\n return P, log\n else:\n return P\n" ]
[ [ "torch.zeros", "torch.isnan", "torch.add", "torch.ones", "torch.isinf", "torch.div", "torch.log", "torch.empty", "torch.matmul", "torch.exp", "torch.sum" ] ]
chummels/yt
[ "19058425e92c2d310b7e767d3ca90fb3559b6a36" ]
[ "yt/data_objects/selection_objects/cut_region.py" ]
[ "import numpy as np\n\nfrom yt.data_objects.selection_objects.data_selection_objects import (\n YTSelectionContainer,\n YTSelectionContainer3D,\n)\nfrom yt.data_objects.static_output import Dataset\nfrom yt.funcs import ensure_list, validate_iterable, validate_object\nfrom yt.geometry.selection_routines import points_in_cells\nfrom yt.utilities.exceptions import YTIllDefinedCutRegion\nfrom yt.utilities.on_demand_imports import _scipy\n\n\nclass YTCutRegion(YTSelectionContainer3D):\n \"\"\"\n This is a data object designed to allow individuals to apply logical\n operations to fields and filter as a result of those cuts.\n\n Parameters\n ----------\n data_source : YTSelectionContainer3D\n The object to which cuts will be applied.\n conditionals : list of strings\n A list of conditionals that will be evaluated. In the namespace\n available, these conditionals will have access to 'obj' which is a data\n object of unknown shape, and they must generate a boolean array. For\n instance, conditionals = [\"obj['temperature'] < 1e3\"]\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> sp = ds.sphere(\"max\", (1.0, 'Mpc'))\n >>> cr = ds.cut_region(sp, [\"obj['temperature'] < 1e3\"])\n \"\"\"\n\n _type_name = \"cut_region\"\n _con_args = (\"base_object\", \"conditionals\")\n _derived_quantity_chunking = \"all\"\n\n def __init__(\n self,\n data_source,\n conditionals,\n ds=None,\n field_parameters=None,\n base_object=None,\n locals=None,\n ):\n if locals is None:\n locals = {}\n validate_object(data_source, YTSelectionContainer)\n validate_iterable(conditionals)\n for condition in conditionals:\n validate_object(condition, str)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(base_object, YTSelectionContainer)\n if base_object is not None:\n # passing base_object explicitly has been deprecated,\n # but we handle it here for backward compatibility\n if data_source is not None:\n raise RuntimeError(\"Cannot use both base_object and data_source\")\n data_source = base_object\n\n self.conditionals = ensure_list(conditionals)\n if isinstance(data_source, YTCutRegion):\n # If the source is also a cut region, add its conditionals\n # and set the source to be its source.\n # Preserve order of conditionals.\n self.conditionals = data_source.conditionals + self.conditionals\n data_source = data_source.base_object\n\n super(YTCutRegion, self).__init__(\n data_source.center, ds, field_parameters, data_source=data_source\n )\n self.base_object = data_source\n self.locals = locals\n self._selector = None\n # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,\n # ires and get_data\n\n def chunks(self, fields, chunking_style, **kwargs):\n # We actually want to chunk the sub-chunk, not ourselves. We have no\n # chunks to speak of, as we do not data IO.\n for chunk in self.index._chunk(self.base_object, chunking_style, **kwargs):\n with self.base_object._chunked_read(chunk):\n with self._chunked_read(chunk):\n self.get_data(fields)\n yield self\n\n def get_data(self, fields=None):\n fields = ensure_list(fields)\n self.base_object.get_data(fields)\n ind = self._cond_ind\n for field in fields:\n f = self.base_object[field]\n if f.shape != ind.shape:\n parent = getattr(self, \"parent\", self.base_object)\n self.field_data[field] = parent[field][self._part_ind(field[0])]\n else:\n self.field_data[field] = self.base_object[field][ind]\n\n @property\n def blocks(self):\n # We have to take a slightly different approach here. Note that all\n # that .blocks has to yield is a 3D array and a mask.\n for obj, m in self.base_object.blocks:\n m = m.copy()\n with obj._field_parameter_state(self.field_parameters):\n for cond in self.conditionals:\n ss = eval(cond)\n m = np.logical_and(m, ss, m)\n if not np.any(m):\n continue\n yield obj, m\n\n @property\n def _cond_ind(self):\n ind = None\n obj = self.base_object\n locals = self.locals.copy()\n if \"obj\" in locals:\n raise RuntimeError(\n '\"obj\" has been defined in the \"locals\" ; '\n \"this is not supported, please rename the variable.\"\n )\n locals[\"obj\"] = obj\n with obj._field_parameter_state(self.field_parameters):\n for cond in self.conditionals:\n res = eval(cond, locals)\n if ind is None:\n ind = res\n if ind.shape != res.shape:\n raise YTIllDefinedCutRegion(self.conditionals)\n np.logical_and(res, ind, ind)\n return ind\n\n def _part_ind_KDTree(self, ptype):\n \"\"\"Find the particles in cells using a KDTree approach.\"\"\"\n parent = getattr(self, \"parent\", self.base_object)\n units = \"code_length\"\n\n pos = np.stack(\n [\n self[(\"index\", \"x\")].to(units),\n self[(\"index\", \"y\")].to(units),\n self[(\"index\", \"z\")].to(units),\n ],\n axis=1,\n ).value\n dx = np.stack(\n [\n self[(\"index\", \"dx\")].to(units),\n self[(\"index\", \"dy\")].to(units),\n self[(\"index\", \"dz\")].to(units),\n ],\n axis=1,\n ).value\n ppos = np.stack(\n [\n parent[(ptype, \"particle_position_x\")],\n parent[(ptype, \"particle_position_y\")],\n parent[(ptype, \"particle_position_z\")],\n ],\n axis=1,\n ).value\n\n mask = np.zeros(ppos.shape[0], dtype=bool)\n levels = self[(\"index\", \"grid_level\")].astype(\"int32\").value\n if levels.size == 0:\n return mask\n\n levelmin = levels.min()\n levelmax = levels.max()\n\n for lvl in range(levelmax, levelmin - 1, -1):\n # Filter out cells not in the current level\n lvl_mask = levels == lvl\n dx_loc = dx[lvl_mask]\n pos_loc = pos[lvl_mask]\n\n grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)\n\n # Compute closest cell for all remaining particles\n dist, icell = grid_tree.query(\n ppos[~mask], distance_upper_bound=dx_loc.max(), p=np.inf\n )\n mask_loc = np.isfinite(dist[:])\n\n # Check that particles within dx of a cell are in it\n i = icell[mask_loc]\n dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i])\n tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1)\n\n mask_loc[mask_loc] = tmp_mask\n\n # Update the particle mask with particles found at this level\n mask[~mask] |= mask_loc\n\n return mask\n\n def _part_ind_brute_force(self, ptype):\n parent = getattr(self, \"parent\", self.base_object)\n units = \"code_length\"\n mask = points_in_cells(\n self[(\"index\", \"x\")].to(units),\n self[(\"index\", \"y\")].to(units),\n self[(\"index\", \"z\")].to(units),\n self[(\"index\", \"dx\")].to(units),\n self[(\"index\", \"dy\")].to(units),\n self[(\"index\", \"dz\")].to(units),\n parent[(ptype, \"particle_position_x\")].to(units),\n parent[(ptype, \"particle_position_y\")].to(units),\n parent[(ptype, \"particle_position_z\")].to(units),\n )\n\n return mask\n\n def _part_ind(self, ptype):\n # If scipy is installed, use the fast KD tree\n # implementation. Else, fall back onto the direct\n # brute-force algorithm.\n try:\n _scipy.spatial.KDTree\n return self._part_ind_KDTree(ptype)\n except ImportError:\n return self._part_ind_brute_force(ptype)\n\n @property\n def icoords(self):\n return self.base_object.icoords[self._cond_ind, :]\n\n @property\n def fcoords(self):\n return self.base_object.fcoords[self._cond_ind, :]\n\n @property\n def ires(self):\n return self.base_object.ires[self._cond_ind]\n\n @property\n def fwidth(self):\n return self.base_object.fwidth[self._cond_ind, :]\n\n def _get_bbox(self):\n \"\"\"\n Get the bounding box for the cut region. Here we just use\n the bounding box for the source region.\n \"\"\"\n return self.base_object._get_bbox()\n" ]
[ [ "numpy.zeros", "numpy.logical_and", "numpy.any", "numpy.stack", "numpy.abs", "numpy.isfinite", "numpy.all" ] ]
slaclab/atlas-chess2
[ "2135a79e1b43bb404abc50aeabe50e577242aa45" ]
[ "software/scripts/SCurveNP_8hits_BL_json3.py" ]
[ "####import ROOT as R\nimport numpy as np\nimport matplotlib #.pyplot as plt\nimport sys\nimport time\nimport re\nimport logging\nimport pickle\nimport json\n#import cPickle as pickle\nimport scipy.io as sio\n# Generating log file\nclass timep:\n def __init__(self,pixel1,matrix1,index1,threshold1,time1):\n self.pixel=pixel1\n self.matrix=matrix1\n self.index=index1\n self.threshold=threshold1\n self.time=time1\n\ndef logfile(logfilename):\n logger=logging.getLogger()\n LOG_FILE=logfilename\n LOG_FORMAT=\"%(asctime)s : %(funcName)s: %(message)s\"\n logging.basicConfig(filename=LOG_FILE,level=logging.DEBUG, format=LOG_FORMAT)\n return logger\n\n#transfer the data\ndef load_chess2_data(filename):\n for i in [2]:\n file_data=open(filename,'r')\n for line in file_data.readlines():\n if ('Shape' in line):\n shape_hist=re.findall('\\d+',line)\n # print(len(shape_hist))\n break\n data_1d=np.loadtxt(sys.argv[1])\n hists=data_1d.reshape(int(shape_hist[0]),int(shape_hist[1]),int(shape_hist[2]),int(shape_hist[3]))\t\n return hists\ndef get_pixels(filename):\n file_data=open(filename,'r')\n pixels=[]\n for line in file_data.readlines():\n a=re.findall('\"pixel\":..........',line)\n #a=re.findall('\"pixel\"',line)\n b=len(a)\n a1_i=0\n a1=[]\n for b_i in range(b):\n if b_i==0:\n a1.append(a[b_i])\n else:\n if a[b_i]!=a1[-1]:\n a1.append(a[b_i])\n for i in range(len(a1)):\n pixel_i=re.findall(r\"\\d+\\:?\\d*\",a1[i])\n p_2=(int(pixel_i[0]),int(pixel_i[1]))\n pixels.append(p_2)\n print(pixels)\n return pixels\ndef get_values(filename):\n file_data=open(sys.argv[1],'r')\n line_count=0\n start=False\n for line in file_data.readlines():\n line_count+=1\n if ('thresholds (raw)' in line):\n thresholds=re.findall('\\d+',line)\n start_line=line_count\n start=True\n if (start):\n if (line_count>start_line):\n if (not (']' in line)):\n thresholds1=re.findall('\\d+',line)\n thresholds.extend(thresholds1)\n else: \n thresholds1=re.findall('\\d+',line)\n thresholds.extend(thresholds1)\n break\n file_data=open(sys.argv[1],'r')\n for line in file_data.readlines():\n if ('PulseDelay:' in line):\n PulseDelay=re.findall('\\d+',line)\n break\n file_data=open(sys.argv[1],'r')\n for line in file_data.readlines():\n if ('PulseWidth:' in line):\n PulseWidth=re.findall('\\d+',line)\n break\n return thresholds,PulseDelay[0],PulseWidth[0]\n \ndef makeSCurve(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\"):\n nColumns = 32\n nRows = 128\n allHists = []\n logging.info(\"Using makeCurve......\")\n ####R.TH1.AddDirectory(R.kFALSE)\n# thresholdCuts = [0x7ce]\n # system.root.readConfig(\"chess2_config.yml\") --- should be in the driver script\n #####tf = R.TFile(histFileName, \"recreate\")\n # Turn on one pixel at a time\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable=0,chargeInj=0) #chargeInj should be 1 in this line and following 2 lines\n system.feb.Chess2Ctrl1.writeAllPixels(enable=0,chargeInj=0)\n system.feb.Chess2Ctrl2.writeAllPixels(enable=0,chargeInj=0)\n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n for (row,col) in pixels:\n print(\"Pixel: (%i,%i)\"%(row,col))\n system.feb.Chess2Ctrl0.writePixel(enable=1, chargeInj=1, col=col, row=row, trimI= 15) #chargeInj should be 0 in these 3 lines\n system.feb.Chess2Ctrl1.writePixel(enable=1, chargeInj=1, col=col, row=row, trimI= 15)\n system.feb.Chess2Ctrl2.writePixel(enable=1, chargeInj=1, col=col, row=row, trimI= 15)\n ####hists_row = [ R.TH1F(\"row_%i_%i_%i\"%(i_asic,row,col),\"\",128,0,128) for i_asic in range(3) ]\n ####hists_col = [ R.TH1F(\"col_%i_%i_%i\"%(i_asic,row,col),\"\",32,0,32) for i_asic in range(3) ]\n hists_row = [[], [], []]\n hists_col = [[], [], []]\n for threshold in thresholdCuts:\n ####hists = [ R.TH1F(\"deltaT_%i_%i_%i_%s\"%(i_asic,row,col,hex(threshold)),\"\",100,0,1000) for i_asic in range(3) ] # deltaT in ns\n print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(threshold))\n hists = [[], [], []]\n# system.feb.dac.dacPIXTHRaw.set(threshold)\n #system.feb.dac.dacBLRaw.set(threshold+608)\n system.feb.dac.dacBLRRaw.set(threshold)\n #system.feb.dac.dacBLRaw.set(threshold)\n # this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this \n # tests to report wrong times\n time.sleep(2.0)\n system.readAll()\n for cnt in range(nCounts):\n #time.sleep(0.1)\n # start charge injection\n system.feb.memReg.chargInjStartEventReg.set(0)\n time.sleep(0.1)\n #system.feb.chargeInj.calPulseVar.set(1)\n system.readAll()\n if system.feb.chargeInj.hitDetValid0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())\n ####hists_row[0].Fill(row_det)\n ####hists_col[0].Fill(col_det)\n hists_row[0].append(row_det)\n hists_col[0].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime0: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n else:\n hists[0].append(-1.0)\n if system.feb.chargeInj.hitDetValid1._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())\n ####hists_row[1].Fill(row_det)\n ####hists_col[1].Fill(col_det)\n hists_row[1].append(row_det)\n hists_col[1].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime1: \", float(system.feb.chargeInj.hitDetTime1._rawGet()))\n else:\n hists[1].append(-1.0)\n if system.feb.chargeInj.hitDetValid2._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())\n ####hists_row[2].Fill(row_det)\n ####hists_col[2].Fill(col_det)\n hists_row[2].append(row_det)\n hists_col[2].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime2: \", float(system.feb.chargeInj.hitDetTime2._rawGet()))\n else:\n hists[2].append(-1.0)\n allHists.append(hists)\n ####[ hist.Write() for hist in hists ]\n# for i in range(3):\n# fig = matplotlib.figure()\n# ax = fig.add_subplot(1, 1, 1)\n# n, bins, patches = ax.hist(hists[i], bins=100, range=(0, 1000))\n# ax.set_xlabel('Delta T in ns')\n# ax.set_ylabel('Frequency')\n# fig.savefig(\"plotDir/deltaT_%i_%i_%i_%s\"%(i,row,col,hex(threshold)))\n# fig.clf()\n ####[ print(\"... ASIC%i %f\"%(i_h,hist.GetEntries())) for (i_h,hist) in enumerate(hists) ]\n# [ print(\"... ASIC%i %f\"%(i_h,len(hist))) for (i_h,hist) in enumerate(hists) ]\n\n\n ####[ hist.Write() for hist in hists_row ]\n ####[ hist.Write() for hist in hists_col ]\n# for i in range(3):\n# fig = matplotlib.figure()\n# ax1 = fig.add_subplot(2, 1, 1)\n# ax2 = fig.add_subplot(2, 1, 2)\n# n, bins, patches = ax1.hist(hists_row[i], bins=128, range=(0, 128))\n# ax1.set_xlabel('Row')\n# ax1.set_ylabel('Frequency')\n# n, bins, patches = ax2.hist(hists_col[i], bins=32, range=(0,32))\n# ax2.set_xlabel('Column')\n# ax2.set_ylabel('Frequency')\n# fig.savefig(\"plotDir/asic_row_col_%i_%i_%i.png\"%(i,row,col))\n# fig.clf()\n\n# system.feb.Chess2Ctrl0.writePixel(enable=0, chargeInj=0, col=col, row=row) \n# system.feb.Chess2Ctrl1.writePixel(enable=0, chargeInj=0, col=col, row=row) \n# system.feb.Chess2Ctrl2.writePixel(enable=0, chargeInj=0, col=col, row=row)\n\n return allHists\n\n# tf.Close()\n\"\"\" The following test enables to test a set of pixels for all trim values. \n The makeCalibCurveLoop function is called to implement the inner loops\n for the set of pixels and for the thresholdCuts\"\"\"\ndef makeCalibCurve(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\"):\n allHists = []\n\n pixEnableLogic = 1\n chargeInjLogic = 0\n logging.info(\"Using makeCalibCurve......\")\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n\n #for trim in range(0,16,2):\n for trim in range(7,8):\n# pixEnableLogic = 1\n# chargeInjLogic = 1\n# print(\"Trim, pixEnableLogic, chargeInjLogic: (%i,%i, %i)\"%(trim, pixEnableLogic, chargeInjLogic))\n# hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)\n# allHists.append(hists)\n\n pixEnableLogic = 1\n chargeInjLogic = 0\n print(\"Trim, pixEnableLogic, chargeInjLogic: (%i,%i,%i)\"%(trim, pixEnableLogic, chargeInjLogic))\n hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)\n allHists.append(hists)\n\n# pixEnableLogic = 0\n# chargeInjLogic = 1\n# print(\"Trim, pixEnableLogic, chargeInjLogic: (%i,%i, %i)\"%(trim, pixEnableLogic, chargeInjLogic))\n# hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)\n# allHists.append(hists)\n\n# pixEnableLogic = 0\n# chargeInjLogic = 0\n# print(\"Trim, pixEnableLogic, chargeInjLogic: (%i,%i, %i)\"%(trim, pixEnableLogic, chargeInjLogic))\n# hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)\n# allHists.append(hists)\n return allHists\n\n\"\"\" The following test specifies a single pixel memory configuration (pixEnableLogic,\n chargeInjLogic and trim). The makeCalibCurveLoop function is called to implement \n the inner loops for the set of pixels and for the thresholdCuts\"\"\"\n\ndef makeCalibCurve2(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\"):\n allHists = []\n logging.info(\"Using makeCalibCurve2......\")\n\n pixEnableLogic = 1\n chargeInjLogic = 0\n trim = 15\n\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n\n\n print(\"Trim, pixEnableLogic, chargeInjLogic: (%i,%i,%i)\"%(trim, pixEnableLogic, chargeInjLogic))\n hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)\n allHists.append(hists)\n\n\ndef makeCalibCurve3(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\"):\n allHists = []\n\n pixEnable = 1\n chargeInj = 0 # 0 - enable / 1 - disabled\n trim = 7\n\n #system.feb.chargeInj.pulseWidthRaw.set(0x7fff)\n\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)\n system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)\n system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)\n\n\n print(\"Trim, pixEnable, chargeInj: (%i,%i,%i)\"%(trim, pixEnable, chargeInj))\n hists = makeCalibCurveLoopTH(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj, pixTrimI = trim)\n allHists.append(hists)\n\n return allHists\n\n\ndef configAsicsPreampTest(system = []):\n system.feb.Chess2Ctrl0.VNatt.set(0x1e) \n system.feb.Chess2Ctrl0.VNres.set(0x1)\n system.feb.Chess2Ctrl0.VPLoadatt.set(0x1c) \n system.feb.Chess2Ctrl0.VPLoadres.set(0x2) \n system.feb.Chess2Ctrl0.VNSFatt.set(0x1f) \n system.feb.Chess2Ctrl0.VNSFres.set(0x3)\n \n system.feb.Chess2Ctrl1.VNatt.set(0x1e)\n system.feb.Chess2Ctrl1.VNres.set(0x1)\n system.feb.Chess2Ctrl1.VPLoadatt.set(0x1c) \n system.feb.Chess2Ctrl1.VPLoadres.set(0x2) \n system.feb.Chess2Ctrl1.VNSFatt.set(0x1f) \n system.feb.Chess2Ctrl1.VNSFres.set(0x3)\n \n system.feb.Chess2Ctrl2.VNatt.set(0x1e)\n system.feb.Chess2Ctrl2.VNres.set(0x1)\n system.feb.Chess2Ctrl2.VPLoadatt.set(0x1c) \n system.feb.Chess2Ctrl2.VPLoadres.set(0x2) \n system.feb.Chess2Ctrl2.VNSFatt.set(0x1f) \n system.feb.Chess2Ctrl2.VNSFres.set(0x3)\n\ndef configAsicsPreampTestRestoreDefaultValues(system = []):\n system.feb.Chess2Ctrl0.VNatt.set(0x1F) \n system.feb.Chess2Ctrl0.VNres.set(0x0)\n system.feb.Chess2Ctrl0.VPLoadatt.set(0x1e) \n system.feb.Chess2Ctrl0.VPLoadres.set(0x1) \n system.feb.Chess2Ctrl0.VNSFatt.set(0x1b) \n system.feb.Chess2Ctrl0.VNSFres.set(0x0)\n \n system.feb.Chess2Ctrl1.VNatt.set(0x1F)\n system.feb.Chess2Ctrl1.VNres.set(0x0)\n system.feb.Chess2Ctrl1.VPLoadatt.set(0x1e) \n system.feb.Chess2Ctrl1.VPLoadres.set(0x1) \n system.feb.Chess2Ctrl1.VNSFatt.set(0x1b) \n system.feb.Chess2Ctrl1.VNSFres.set(0x0)\n \n system.feb.Chess2Ctrl2.VNatt.set(0x1F)\n system.feb.Chess2Ctrl2.VNres.set(0x0)\n system.feb.Chess2Ctrl2.VPLoadatt.set(0x1e) \n system.feb.Chess2Ctrl2.VPLoadres.set(0x1) \n system.feb.Chess2Ctrl2.VNSFatt.set(0x1b) \n system.feb.Chess2Ctrl2.VNSFres.set(0x0)\n\n\n\n\ndef makeCalibCurve4(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\", deltaBLToBLR = 608, chargeInjectionEnbled = 0, BL=0x26c):\n\n logging.info(\"Using makeCalibCurve4......\")\n #ASIC specific configuration selected depending on the test being run\n\n pixEnable = 1\n chargeInj1 = not chargeInjectionEnbled # 0 - enable / 1 - disabled\n trim = 7\n\n #system.feb.chargeInj.pulseWidthRaw.set(0x7fff)\n system.feb.chargeInj.calPulseInh.set(chargeInj1)\n\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1,trimI= trim)\n system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1,trimI= trim)\n system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1,trimI= trim)\n print(\"Trim, pixEnable, chargeInj: (%i,%i,%i)\"%(trim, pixEnable, chargeInj1))\n if pixels==None:\n hitmap0,hitmap1,hitmap2,hists = makeCalibCurveLoopBLx_hitmap(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj1, pixTrimI = trim, deltaBLToBLR = deltaBLToBLR,pixth=Pixth)\n else:\n hists = makeCalibCurveLoopBLx_8hits(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj1, pixTrimI = trim, deltaBLToBLR = deltaBLToBLR,BL_v=BL)\n if pixels==None:\n return hitmap0,hitmap1,hitmap2,allHists\n else:\n return hists\n\ndef makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0):\n nColumns = 32\n nRows = 128\n allHists = []\n logging.info(\"Using makeCalibCurveLoop......\")\n\n\n # Turn on one pixel at a time\n# print(\"Disable all pixels\")\n# system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n# system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n# system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n for (row,col) in pixels:\n print(\"Pixel: (%i,%i)\"%(row,col))\n system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n ####hists_row = [ R.TH1F(\"row_%i_%i_%i\"%(i_asic,row,col),\"\",128,0,128) for i_asic in range(3) ]\n ####hists_col = [ R.TH1F(\"col_%i_%i_%i\"%(i_asic,row,col),\"\",32,0,32) for i_asic in range(3) ]\n hists_row = [[], [], []]\n hists_col = [[], [], []]\n for threshold in thresholdCuts:\n ####hists = [ R.TH1F(\"deltaT_%i_%i_%i_%s\"%(i_asic,row,col,hex(threshold)),\"\",100,0,1000) for i_asic in range(3) ] # deltaT in ns\n hists = [[], [], []]\n# system.feb.dac.dacPIXTHRaw.set(threshold)\n #system.feb.dac.dacBLRaw.set(threshold+608)\n #print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(threshold))\n #system.feb.dac.dacBLRRaw.set(threshold)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(threshold))\n system.feb.dac.dacBLRaw.set(threshold)\n # this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this \n # tests to report wrong times\n time.sleep(2.0)\n system.readAll()\n for cnt in range(nCounts):\n #time.sleep(0.1)\n # start charge injection\n #system.feb.memReg.chargInjStartEventReg.set(0)\n system.feb.chargeInj.calPulseVar.set(1)\n time.sleep(0.1) \n system.readAll()\n if system.feb.chargeInj.hitDetValid0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())\n ####hists_row[0].Fill(row_det)\n ####hists_col[0].Fill(col_det)\n hists_row[0].append(row_det)\n hists_col[0].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime0: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n else:\n hists[0].append(-1.0)\n if system.feb.chargeInj.hitDetValid1._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())\n ####hists_row[1].Fill(row_det)\n ####hists_col[1].Fill(col_det)\n hists_row[1].append(row_det)\n hists_col[1].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime1: \", float(system.feb.chargeInj.hitDetTime1._rawGet()))\n else:\n hists[1].append(-1.0)\n if system.feb.chargeInj.hitDetValid2._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())\n ####hists_row[2].Fill(row_det)\n ####hists_col[2].Fill(col_det)\n hists_row[2].append(row_det)\n hists_col[2].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime2: \", float(system.feb.chargeInj.hitDetTime2._rawGet()))\n else:\n hists[2].append(-1.0)\n allHists.append(hists)\n \n return allHists\n\ndef makeCalibCurveLoopBLx_hitmap(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0, deltaBLToBLR = 608, pixth=0x800):\n nColumns = 32\n nRows = 128\n logging.info(\" Using makeCalibCurveLoopBLx_hitmap......\")\n hitmap_mat0=[[0 for i in range(nColumns)] for j in range(nRows)]\n hitmap_mat1=[[0 for i in range(nColumns)] for j in range(nRows)]\n hitmap_mat2=[[0 for i in range(nColumns)] for j in range(nRows)]\n hist=[[],[],[]] \n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n delt_Bl=[0x10,0x8,0x1] \n print(\"Thresholds (system.feb.dac.dacPIXTHRaw): \", hex(pixth))\n system.feb.dac.dacPIXTHRaw.set(pixth)\n for (row,col) in pixels:\n BL_up=0x800\n BL_bot=0x0\n act_pixel0=0\n act_pixel1=0\n act_pixel2=0\n print(\"Matrix0 -Pixel: (%i,%i)\"%(row,col))\n for Delt_BL in delt_Bl:\n if act_pixel0==2:\n break\n print(\"delt_BL is : \",Delt_BL)\n thresholdCuts= np.arange(BL_bot,BL_up,Delt_BL)\n for threshold in thresholdCuts:\n BLRValue = threshold + deltaBLToBLR\n system.feb.dac.dacBLRRaw.set(BLRValue)\n #print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(BLRValue))\n system.feb.dac.dacBLRaw.set(threshold)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(threshold))\n system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n hit_num=0\n for cnt in range(nCounts):\n #time.sleep(1.0)\n system.feb.chargeInj.calPulseVar.set(1)\n #time.sleep(0.05) \n system.readAll()\n if system.feb.chargeInj.hitDetValid0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())\n if (row_det==row and col_det==col):\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime0: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hit_num+=1\n print(\"hit number:\",hit_num)\n if (hit_num>=0.5*nCounts):\n BL_bot=threshold-Delt_BL\n BL_up=threshold \n act_pixel0=1\n print(\"BL_bottom:\",BL_bot,\"_up:\",BL_up) \n #break\n break\n #continue\n #else: \n if (act_pixel0==0 and threshold==thresholdCuts[-1]):\n act_pixel0=2\n print(\"dead pixel\")\n hitmap_mat0[row][col]=BL_up\n hist[0].append(BL_up)\n print(\"The thresholds of Matrix0 (\",row,col,\"):\",BL_up) \n system.feb.Chess2Ctrl0.writePixel(enable=not pixEnableLogic, chargeInj=1, col=col, row=row)\n BL_up=0x800\n BL_bot=0x0\n print(\"Matrix1 -Pixel: (%i,%i)\"%(row,col))\n for Delt_BL in delt_Bl:\n if act_pixel1==2:\n break\n print(\"delt_BL: \",Delt_BL)\n thresholdCuts= np.arange(BL_bot,BL_up,Delt_BL)\n for threshold in thresholdCuts:\n BLRValue = threshold + deltaBLToBLR\n system.feb.dac.dacBLRRaw.set(BLRValue)\n #print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(BLRValue))\n system.feb.dac.dacBLRaw.set(threshold)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(threshold))\n system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n hit_num=0\n for cnt in range(nCounts):\n #time.sleep(1.0)\n system.feb.chargeInj.calPulseVar.set(1)\n #time.sleep(0.05) \n system.readAll()\n if system.feb.chargeInj.hitDetValid1._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())\n if (row_det==row and col_det==col):\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime1: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hit_num+=1\n print(\"hit number:\",hit_num)\n if (hit_num>=0.5*nCounts):\n BL_bot=threshold-Delt_BL\n BL_up=threshold\n act_pixel1=1 \n print(\"BL_bottom:\",BL_bot,\"_up:\",BL_up) \n #break\n break\n if (act_pixel1==0 and threshold==thresholdCuts[-1]):\n act_pixel1=2 \n print(\"dead pixel\")\n break\n \n\n hitmap_mat1[row][col]=BL_up\n hist[1].append(BL_up)\n print(\"find the thresholds of Matrix1 (\",row,col,\"):\",BL_up) \n system.feb.Chess2Ctrl1.writePixel(enable=not pixEnableLogic, chargeInj=1, col=col, row=row)\n BL_up=0x800\n BL_bot=0x0\n print(\"Matrix2 -Pixel: (%i,%i)\"%(row,col))\n for Delt_BL in delt_Bl:\n if act_pixel2==2:\n break\n print(\"delt_BL: \",Delt_BL)\n thresholdCuts= np.arange(BL_bot,BL_up,Delt_BL)\n for threshold in thresholdCuts:\n BLRValue = threshold + deltaBLToBLR\n system.feb.dac.dacBLRRaw.set(BLRValue)\n #print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(BLRValue))\n system.feb.dac.dacBLRaw.set(threshold)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(threshold))\n system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n hit_num=0\n for cnt in range(nCounts):\n #time.sleep(1.0)\n system.feb.chargeInj.calPulseVar.set(1)\n #time.sleep(0.05) \n system.readAll()\n if system.feb.chargeInj.hitDetValid2._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())\n if (row_det==row and col_det==col):\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime2: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hit_num+=1\n print(\"hit number:\",hit_num)\n if (hit_num>=0.5*nCounts):\n BL_bot=threshold-Delt_BL\n BL_up=threshold \n act_pixel2=1 \n print(\"BL_bottom:\",BL_bot,\"_up:\",BL_up) \n #break\n break\n if (act_pixel2==0 and threshold==thresholdCuts[-1]):\n print(\"dead pixel\")\n act_pixel2=2\n break\n break\n\n hitmap_mat2[row][col]=BL_up\n hist[2].append(BL_up)\n print(\"find the thresholds of Matrix2 (\",row,col,\"):\",BL_up) \n system.feb.Chess2Ctrl2.writePixel(enable=not pixEnableLogic, chargeInj=1, col=col, row=row)\n \n allHists.append(hists)\n return hitmap_mat0,hitmap_mat1,hitmap_mat2,allHists\n\n\ndef makeCalibCurveLoopBLx(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0, deltaBLToBLR = 608, BL_v=0x800):\n nColumns = 32\n nRows = 128\n allHists = []\n logging.info(\" Using makeCalibCurveLoopBLx......\")\n \n\n if pixels==None:\n hitmap_get=True\n hitmap_mat0=[[0 for i in range(nColumns)] for j in range(nRows)]\n hitmap_mat1=[[0 for i in range(nColumns)] for j in range(nRows)]\n hitmap_mat2=[[0 for i in range(nColumns)] for j in range(nRows)]\n print(\"hitmap_get: \",hitmap_get)\n else: \n hitmap_get=False\n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n BLRValue = BL_v + deltaBLToBLR\n system.feb.dac.dacBLRRaw.set(BLRValue)\n print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(BLRValue))\n system.feb.dac.dacBLRaw.set(BL_v)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(BL_v))\n for (row,col) in pixels:\n system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n print(\"enable Pixel: (%i,%i)\"%(row,col))\n hists_row = [[], [], []]\n hists_col = [[], [], []]\n for threshold in thresholdCuts:\n print(\"Thresholds (system.feb.dac.dacPIXTHRaw): \", hex(threshold))\n system.feb.dac.dacPIXTHRaw.set(threshold)\n hists=[[],[],[]]\n time.sleep(1.0)\n system.readAll()\n ####hists = [ R.TH1F(\"deltaT_%i_%i_%i_%s\"%(i_asic,row,col,hex(threshold)),\"\",100,0,1000) for i_asic in range(3) ] # deltaT in ns\n for cnt in range(nCounts):\n #time.sleep(1.0)\n #system.readAll()\n #system.feb.memReg.chargInjStartEventReg.set(0)\n system.feb.chargeInj.calPulseVar.set(1)\n time.sleep(0.05) \n system.readAll()\n if system.feb.chargeInj.hitDetValid0_0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow0_0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol0_0._rawGet())\n if hitmap_get:\n hitmap_mat0[row_det][col_det]+=1 \n if (row_det==row and col_det==col):\n hists_row[0].append(row_det)\n hists_col[0].append(col_det)\n hists[0].append(float(system.feb.chargeInj.hitDetTime0_0._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime0: \", float(system.feb.chargeInj.hitDetTime0_0._rawGet()))\n else:\n hists[0].append(-2.0)\n print(\"row_det: \",row_det, \":col_det:\", col_det, \":system.feb.chargeInj.hitDetTime0: \", float(-2),\" is\",float(system.feb.chargeInj.hitDetTime0_0._rawGet()))\n else:\n hists[0].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime0: \", float(-1))\n if system.feb.chargeInj.hitDetValid1_0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow1_0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol1_0._rawGet())\n if hitmap_get: \n hitmap_mat1[row_det][col_det]+=1 \n if (row_det==row and col_det==col):\n hists_row[1].append(row_det)\n hists_col[1].append(col_det)\n hists[1].append(float(system.feb.chargeInj.hitDetTime1_0._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime1: \", float(system.feb.chargeInj.hitDetTime1_0._rawGet()))\n else:\n hists[1].append(-2.0)\n print(\"row_det: \",row_det, \":col_det:\", col_det, \":system.feb.chargeInj.hitDetTime1: \", float(-2),\" is\",float(system.feb.chargeInj.hitDetTime1_0._rawGet()))\n else:\n hists[1].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime1: \", float(-1))\n if system.feb.chargeInj.hitDetValid2_0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow2_0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol2_0._rawGet())\n if hitmap_get:\n hitmap_mat2[row_det][col_det]+=1 \n if (row_det==row and col_det==col):\n hists_row[2].append(row_det)\n hists_col[2].append(col_det)\n hists[2].append(float(system.feb.chargeInj.hitDetTime2_0._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime2: \", float(system.feb.chargeInj.hitDetTime2_0._rawGet()))\n else:\n hists[2].append(-2.0)\n print(\"row_det: \",row_det, \":col_det:\", col_det, \":system.feb.chargeInj.hitDetTime2: \", float(-2),\" is\",float(system.feb.chargeInj.hitDetTime2_0._rawGet()))\n else:\n hists[2].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime2: \", float(-1))\n allHists.append(hists)\n if (hitmap_get):\n system.feb.Chess2Ctrl0.writePixel(enable=not pixEnableLogic, chargeInj=1, col=col, row=row)\n system.feb.Chess2Ctrl1.writePixel(enable=not pixEnableLogic, chargeInj=1, col=col, row=row)\n system.feb.Chess2Ctrl2.writePixel(enable=not pixEnableLogic, chargeInj=1, col=col, row=row)\n print(\"disabling pixel:\",row,col)\n if hitmap_get:\n return hitmap_mat0,hitmap_mat1,hitmap_mat2,allHists\n else:\n return allHists\n\ndef get_allHists(pixels,matrix,indexs,thresholdCuts):\n allHist={}.fromkeys(pixels)\n for pixel in pixels:\n allHist[pixel]={}.fromkeys(matrix)\n for matri in matrix:\n allHist[pixel][matri]={}.fromkeys(indexs)\n for index in indexs:\n allHist[pixel][matri][index]={}.fromkeys(thresholdCuts)\n for threshold in thresholdCuts:\n allHist[pixel][matri][index][threshold]=[]\n return allHist\n\ndef save_f(file_name,hists):\n with open(file_name,\"w\") as save_file:\n for key in hists:\n for key1 in hists[key]:\n for key2 in hists[key][key1]:\n if hists[key][key1][key2]:\n save_file.writelines(\"pixel:{0}\\nmatrix:{1}\\nthreshold:{2}\\nhit_time:{3}\\n\".format(key,key1,key2,hists[key][key1][key2]))\n save_file.close()\n\ndef save_f_pickle(file_name,hists):\n file_pickle=open(file_name,'wb')\n pickle.dump(hists,file_pickle,protocol=1)\n file_pickle.close() \n\ndef dic2timep(dic):\n return timep(dic['pixel'],dic['matrix'],dic['index'],dic['threshold'],dic['time'])\n\ndef timep2dic(timep):\n return {'pixel':timep.pixel,'matrix':timep.matrix,'index':timep.index,'threshold':timep.threshold,'time':timep.time}\n\ndef save_f_json(file_name,hists):\n with open(file_name+'.json','w',encoding='utf-8') as f:\n json.dump(hists,f,default=timep2dic)\n\ndef save_timep(hists):\n allhist=[]\n for key in hists: #pixel\n print(key)\n for key1 in hists[key]: #matrix\n for key2 in hists[key][key1]: #index\n for key3 in hists[key][key1][key2]: #threshold\n #if hists[key][key1][key2][key3]: \n one=timep(key,key1,key2,key3,hists[key][key1][key2][key3])\n allhist.append(one)\n return allhist\n\ndef print_f(file_name):\n with open(file_name,\"r\") as print_file:\n for line in print_file.readlines():\n print(line)\n \ndef makeCalibCurveLoopBLx_8hits(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\",pixEnableLogic=1,chargeInjLogic=0,pixTrimI=0,deltaBLToBLR=608,BL_v=0x800):\n nColumns = 32\n nRows = 128\n logging.info(\" Using makeCalibCurveLoopBLx_8hits......\")\n matrix=[0,1,2]\n hits=[0,1,2,3,4,5,6,7]\n allHists=get_allHists(pixels,matrix,hits,thresholdCuts)\n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n BLRValue = BL_v + deltaBLToBLR\n system.feb.dac.dacBLRRaw.set(BLRValue)\n print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(BLRValue))\n system.feb.dac.dacBLRaw.set(BL_v)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(BL_v))\n for (row,col) in pixels:\n system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n print(\"enable Pixel: (%i,%i)\"%(row,col))\n #a=thresholdCuts[1]\n for threshold in thresholdCuts:\n #threshold=a\n print(\"Thresholds (system.feb.dac.dacPIXTHRaw): \", hex(threshold))\n system.feb.dac.dacPIXTHRaw.set(threshold)\n time.sleep(1.0)\n system.readAll()\n for cnt in range(nCounts):\n system.feb.chargeInj.calPulseVar.set(1)\n time.sleep(0.05)\n system.readAll()\n for n in [1]:\n for hit in hits:\n matrix_i=0\n if eval(get_funct('Valid',matrix_i,hit)):\n row_det = int(eval(get_funct('row_det',matrix_i,hit)))\n col_det = int(eval(get_funct('col_det',matrix_i,hit)))\n if (row_det,col_det) in pixels:\n allHists[(row_det,col_det)][matrix_i][hit][threshold].append(float(eval(get_funct('time_det',matrix_i,hit))))\n print(\"row_det: \",row_det, \"col_det:\", col_det, \"system.feb.chargeInj.hitDetTime\"+str(matrix_i)+\"_\"+str(hit)+\":\", float(eval(get_funct('time_det',matrix_i,hit))))\n matrix_i=1\n if eval(get_funct('Valid',matrix_i,hit)):\n row_det = int(eval(get_funct('row_det',matrix_i,hit)))\n col_det = int(eval(get_funct('col_det',matrix_i,hit)))\n if (row_det,col_det) in pixels:\n allHists[(row_det,col_det)][matrix_i][hit][threshold].append(float(eval(get_funct('time_det',matrix_i,hit))))\n print(\"row_det: \",row_det, \"col_det:\", col_det, \"system.feb.chargeInj.hitDetTime\"+str(matrix_i)+\"_\"+str(hit)+\":\", float(eval(get_funct('time_det',matrix_i,hit))))\n matrix_i=2\n if eval(get_funct('Valid',matrix_i,hit)):\n row_det = int(eval(get_funct('row_det',matrix_i,hit)))\n col_det = int(eval(get_funct('col_det',matrix_i,hit)))\n if (row_det,col_det) in pixels:\n allHists[(row_det,col_det)][matrix_i][hit][threshold].append(float(eval(get_funct('time_det',matrix_i,hit))))\n print(\"row_det: \",row_det, \"col_det:\", col_det, \"system.feb.chargeInj.hitDetTime\"+str(matrix_i)+\"_\"+str(hit)+\":\", float(eval(get_funct('time_det',matrix_i,hit))))\n allhist=save_timep(allHists)\n return allhist\n\ndef get_funct(name,matrix,hit):\n name_d={'Valid':'system.feb.chargeInj.hitDetValid'+str(matrix)+'_'+str(hit)+'._rawGet','row_det':'system.feb.chargeInj.hitDetRow'+str(matrix)+'_'+str(hit)+'._rawGet()','col_det':'system.feb.chargeInj.hitDetCol'+str(matrix)+'_'+str(hit)+'._rawGet()','time_det':'system.feb.chargeInj.hitDetTime'+str(matrix)+'_'+str(hit)+'._rawGet()'}\n return name_d[name]\n\n\n\n\ndef makeCalibCurveLoopTH(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0):\n nColumns = 32\n nRows = 128\n allHists = []\n logging.info(\" Using makeCalibCurveLoopTH......\")\n\n\n # Turn on one pixel at a time\n# print(\"Disable all pixels\")\n# system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n# system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n# system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)\n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n for (row,col) in pixels:\n print(\"Pixel: (%i,%i)\"%(row,col))\n system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n ####hists_row = [ R.TH1F(\"row_%i_%i_%i\"%(i_asic,row,col),\"\",128,0,128) for i_asic in range(3) ]\n ####hists_col = [ R.TH1F(\"col_%i_%i_%i\"%(i_asic,row,col),\"\",32,0,32) for i_asic in range(3) ]\n hists_row = [[], [], []]\n hists_col = [[], [], []]\n for threshold in thresholdCuts:\n ####hists = [ R.TH1F(\"deltaT_%i_%i_%i_%s\"%(i_asic,row,col,hex(threshold)),\"\",100,0,1000) for i_asic in range(3) ] # deltaT in ns\n hists = [[], [], []]\n print(\"Thresholds (system.feb.dac.dacPIXTHRaw): \", hex(threshold))\n system.feb.dac.dacPIXTHRaw.set(threshold)\n #system.feb.dac.dacBLRaw.set(threshold+608)\n #print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(threshold))\n #system.feb.dac.dacBLRRaw.set(threshold)\n# print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(threshold))\n# system.feb.dac.dacBLRaw.set(threshold)\n # this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this \n # tests to report wrong times\n time.sleep(2.0)\n system.readAll()\n for cnt in range(nCounts):\n #time.sleep(0.1)\n # start charge injection\n #system.feb.memReg.chargInjStartEventReg.set(0)\n system.feb.chargeInj.calPulseVar.set(1)\n time.sleep(0.1) \n system.readAll()\n if system.feb.chargeInj.hitDetValid0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())\n ####hists_row[0].Fill(row_det)\n ####hists_col[0].Fill(col_det)\n hists_row[0].append(row_det)\n hists_col[0].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime0: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n else:\n hists[0].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime0: \", float(-1))\n\n if system.feb.chargeInj.hitDetValid1._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())\n ####hists_row[1].Fill(row_det)\n ####hists_col[1].Fill(col_det)\n hists_row[1].append(row_det)\n hists_col[1].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime1: \", float(system.feb.chargeInj.hitDetTime1._rawGet()))\n else:\n hists[1].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime1: \", float(-1))\n\n if system.feb.chargeInj.hitDetValid2._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())\n ####hists_row[2].Fill(row_det)\n ####hists_col[2].Fill(col_det)\n hists_row[2].append(row_det)\n hists_col[2].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n print(\"row_det: \",row_det, \"col_det\", col_det, \"system.feb.chargeInj.hitDetTime2: \", float(system.feb.chargeInj.hitDetTime2._rawGet()))\n else:\n hists[2].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime2: \", float(-1))\n\n allHists.append(hists)\n \n return allHists\n\n\ndef swingTHvsBL(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\"):\n allHists = []\n logging.info(\"Using swingTHvsBL......\")\n\n pixEnable = 1\n chargeInj = 1\n trim = 15\n\n #\n system.feb.dac.dacPIXTHRaw.set(0x9ce)\n system.feb.dac.dacBLRRaw.set(0x5c2)\n system.feb.dac.dacBLRaw.set(0x5c2)\n # \n system.feb.memReg.initValueReg.set(0x0)\n system.feb.memReg.endValueReg.set(0xfff)\n system.feb.memReg.delayValueReg.set(0x5)\n\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)\n system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)\n system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)\n\n\n print(\"Trim, pixEnable, chargeInj: (%i,%i,%i)\"%(trim, pixEnable, chargeInj))\n hists = SwingThLoopBLx(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj, pixTrimI = trim, vs = 'BL')\n allHists.append(hists)\n\n return allHists\n\n\ndef swingTHvsBLR(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\"):\n allHists = []\n\n pixEnable = 1\n chargeInj = 1\n trim = 15\n\n #\n system.feb.dac.dacPIXTHRaw.set(0x9ce)\n system.feb.dac.dacBLRRaw.set(0x5c2)\n system.feb.dac.dacBLRaw.set(0x5c2)\n # \n system.feb.memReg.initValueReg.set(0x0)\n system.feb.memReg.endValueReg.set(0xfff)\n system.feb.memReg.delayValueReg.set(0x5)\n\n print(\"Disable all pixels\")\n system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)\n system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)\n system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)\n\n\n print(\"Trim, pixEnable, chargeInj: (%i,%i,%i)\"%(trim, pixEnable, chargeInj))\n hists = SwingThLoopBLx(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj, pixTrimI = trim, vs = 'BLR')\n allHists.append(hists)\n\n return allHists\n\ndef SwingThLoopBLx(system,nCounts,thresholdCuts,pixels=None,histFileName=\"scurve.root\", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0, vs = 'BL'):\n nColumns = 32\n nRows = 128\n allHists = []\n logging.info(\"Using SwingThLoopBLx......\")\n\n pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]\n for (row,col) in pixels:\n print(\"Pixel: (%i,%i)\"%(row,col))\n system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)\n\n hists_row = [[], [], []]\n hists_col = [[], [], []]\n for threshold in thresholdCuts:\n ####hists = [ R.TH1F(\"deltaT_%i_%i_%i_%s\"%(i_asic,row,col,hex(threshold)),\"\",100,0,1000) for i_asic in range(3) ] # deltaT in ns\n hists = [[], [], []]\n #print(\"Thresholds (system.feb.dac.dacPIXTHRaw): \", hex(threshold))\n #system.feb.dac.dacPIXTHRaw.set(threshold)\n #system.feb.dac.dacBLRaw.set(threshold+608)\n if (vs == 'BL'):\n system.feb.dac.dacBLRaw.set(threshold)\n print(\"Thresholds (system.feb.dac.dacBLRaw): \", hex(threshold), ':system.feb.dac.dacBL:', system.feb.dac.dacBL._rawGet())\n\n else:\n print(\"Thresholds (system.feb.dac.dacBLRRaw): \", hex(threshold), ':system.feb.dac.dacBLR:', system.feb.dac.dacBLR._rawGet())\n system.feb.dac.dacBLRRaw.set(threshold)\n\n # this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this \n # tests to report wrong times\n time.sleep(2.0)\n system.readAll()\n for cnt in range(nCounts):\n #time.sleep(0.1)\n # start charge injection\n system.feb.memReg.chargInjStartEventReg.set(0)\n #system.feb.chargeInj.calPulseVar.set(1)\n time.sleep(0.1) \n system.readAll()\n if system.feb.chargeInj.hitDetValid0._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())\n ####hists_row[0].Fill(row_det)\n ####hists_col[0].Fill(col_det)\n hists_row[0].append(row_det)\n hists_col[0].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))\n print(\"row_det: \",row_det, \":col_det:\", col_det, \":system.feb.chargeInj.hitDetTime0: \", float(system.feb.chargeInj.hitDetTime0._rawGet()))\n else:\n hists[0].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime0: \", float(-1))\n\n if system.feb.chargeInj.hitDetValid1._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())\n ####hists_row[1].Fill(row_det)\n ####hists_col[1].Fill(col_det)\n hists_row[1].append(row_det)\n hists_col[1].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))\n print(\"row_det: \",row_det, \":col_det:\", col_det, \":system.feb.chargeInj.hitDetTime1: \", float(system.feb.chargeInj.hitDetTime1._rawGet()))\n else:\n hists[1].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime1: \", float(-1))\n\n if system.feb.chargeInj.hitDetValid2._rawGet():\n row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())\n col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())\n ####hists_row[2].Fill(row_det)\n ####hists_col[2].Fill(col_det)\n hists_row[2].append(row_det)\n hists_col[2].append(col_det)\n #if (row == row_det) and (col == col_det):\n ####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))\n print(\"row_det: \",row_det, \":col_det:\", col_det, \":system.feb.chargeInj.hitDetTime2: \", float(system.feb.chargeInj.hitDetTime2._rawGet()))\n else:\n hists[2].append(-1.0)\n print(\"row_det: \",-1, \":col_det:\", -1, \":system.feb.chargeInj.hitDetTime2: \", float(-1))\n\n allHists.append(hists)\n \n return allHists\n\n" ]
[ [ "numpy.loadtxt", "numpy.arange" ] ]
adriaan16/brainstorm
[ "568603cb99f412476a3712d127dc3bc95f08fa35" ]
[ "ext/neuron/neuron/generators.py" ]
[ "\"\"\" generators for the neuron project \"\"\"\n\n# general imports\nimport sys\nimport os\nimport zipfile\n\n# third party imports\nimport numpy as np\nimport nibabel as nib\nimport scipy\nimport keras\nfrom keras.utils import np_utils \nfrom keras.models import Model\n\n# local packages\nimport pynd.ndutils as nd\nimport pytools.patchlib as pl\nimport pytools.timer as timer\n\n# reload patchlib (it's often updated right now...)\nfrom imp import reload\nreload(pl)\n\n# other neuron (this project) packages\nfrom . import dataproc as nrn_proc\nfrom . import models as nrn_models\n\n\nclass Vol(object):\n \n def __init__(self, \n volpath,\n ext='.npz',\n nb_restart_cycle=None, # number of files to restart after\n name='single_vol', # name\n fixed_vol_size=True, # assumes each volume is fixed size\n ):\n\n # get filenames at given paths\n volfiles = _get_file_list(volpath, ext, vol_rand_seed)\n nb_files = len(volfiles)\n assert nb_files > 0, \"Could not find any files at %s with extension %s\" % (volpath, ext)\n\n # set up restart cycle for volume files --\n # i.e. after how many volumes do we restart\n if nb_restart_cycle is None:\n nb_restart_cycle = nb_files\n\n # compute subvolume split\n vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)\n # process volume\n if data_proc_fn is not None:\n vol_data = data_proc_fn(vol_data)\n [f for f in _npz_headers(npz, namelist=['vol_data.npy'])][0][1]\n\n nb_patches_per_vol = 1\n if fixed_vol_size and (patch_size is not None) and all(f is not None for f in patch_size):\n nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))\n\n assert nb_restart_cycle <= (nb_files * nb_patches_per_vol), \\\n '%s restart cycle (%s) too big (%s) in %s' % \\\n (name, nb_restart_cycle, nb_files * nb_patches_per_vol, volpath)\n\n\ndef vol(volpath,\n ext='.npz',\n batch_size=1,\n expected_nb_files=-1,\n expected_files=None,\n data_proc_fn=None, # processing function that takes in one arg (the volume)\n relabel=None, # relabeling array\n nb_labels_reshape=0, # reshape to categorial format for keras, need # labels\n keep_vol_size=False, # whether to keep the volume size on categorical resizing\n name='single_vol', # name, optional\n nb_restart_cycle=None, # number of files to restart after\n patch_size=None, # split the volume in patches? if so, get patch_size\n patch_stride=1, # split the volume in patches? if so, get patch_stride\n collapse_2d=None,\n extract_slice=None,\n force_binary=False,\n nb_feats=1,\n patch_rand=False,\n patch_rand_seed=None,\n vol_rand_seed=None,\n binary=False,\n yield_incomplete_final_batch=True,\n verbose=False):\n \"\"\"\n generator for single volume (or volume patches) from a list of files\n\n simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,\n and prepares it for keras model formats\n\n if a patch size is passed, breaks the volume into patches and generates those\n \"\"\"\n\n # get filenames at given paths\n volfiles = _get_file_list(volpath, ext, vol_rand_seed)\n nb_files = len(volfiles)\n assert nb_files > 0, \"Could not find any files at %s with extension %s\" % (volpath, ext)\n\n # compute subvolume split\n vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)\n\n # process volume\n if data_proc_fn is not None:\n vol_data = data_proc_fn(vol_data)\n\n nb_patches_per_vol = 1\n if patch_size is not None and all(f is not None for f in patch_size):\n if relabel is None and len(patch_size) == (len(vol_data.shape) - 1):\n tmp_patch_size = [f for f in patch_size]\n patch_size = [*patch_size, vol_data.shape[-1]]\n patch_stride = [f for f in patch_stride]\n patch_stride = [*patch_stride, vol_data.shape[-1]]\n assert len(vol_data.shape) == len(patch_size), \"Vol dims %d are not equal to patch dims %d\" % (len(vol_data.shape), len(patch_size))\n nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))\n if nb_restart_cycle is None:\n print(\"setting restart cycle to\", nb_files)\n nb_restart_cycle = nb_files\n \n assert nb_restart_cycle <= (nb_files * nb_patches_per_vol), \\\n '%s restart cycle (%s) too big (%s) in %s' % \\\n (name, nb_restart_cycle, nb_files * nb_patches_per_vol, volpath)\n\n # check the number of files matches expected (if passed)\n if expected_nb_files >= 0:\n assert nb_files == expected_nb_files, \\\n \"number of files do not match: %d, %d\" % (nb_files, expected_nb_files)\n if expected_files is not None:\n if not (volfiles == expected_files):\n print('file lists did not match. You should probably stop execution.', file=sys.stderr)\n print(len(volfiles), len(expected_files))\n\n if verbose:\n print('nb_restart_cycle:', nb_restart_cycle)\n\n # iterate through files\n fileidx = -1\n batch_idx = -1\n feat_idx = 0\n batch_shape = None\n while 1:\n fileidx = np.mod(fileidx + 1, nb_restart_cycle)\n if verbose and fileidx == 0:\n print('starting %s cycle' % name)\n\n # read next file (circular)\n \n try:\n if verbose:\n print('opening %s' % os.path.join(volpath, volfiles[fileidx]))\n file_name = os.path.join(volpath, volfiles[fileidx])\n vol_data = _load_medical_volume(file_name, ext, verbose)\n # print(file_name, \" was loaded\", vol_data.shape)\n except:\n debug_error_msg = \"#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s\"\n print(debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0]))\n raise\n\n # process volume\n if data_proc_fn is not None:\n vol_data = data_proc_fn(vol_data)\n\n # the original segmentation files have non-sequential relabel (i.e. some relabel are\n # missing to avoid exploding our model, we only care about the relabel that exist.\n if relabel is not None:\n vol_data = _relabel(vol_data, relabel)\n\n # split volume into patches if necessary and yield\n if patch_size is None:\n this_patch_size = vol_data.shape\n patch_stride = [1 for f in this_patch_size]\n \n else:\n this_patch_size = [f for f in patch_size]\n for pi, p in enumerate(this_patch_size):\n if p is None:\n this_patch_size[pi] = vol_data.shape[pi]\n patch_stride[pi] = 1\n\n assert ~np.any(np.isnan(vol_data)), \"Found a nan for %s\" % volfiles[fileidx]\n assert np.all(np.isfinite(vol_data)), \"Found a inf for %s\" % volfiles[fileidx]\n\n patch_gen = patch(vol_data, this_patch_size,\n patch_stride=patch_stride,\n nb_labels_reshape=nb_labels_reshape,\n batch_size=1,\n infinite=False,\n collapse_2d=collapse_2d,\n patch_rand=patch_rand,\n patch_rand_seed=patch_rand_seed,\n keep_vol_size=keep_vol_size)\n\n empty_gen = True\n patch_idx = -1\n for lpatch in patch_gen:\n empty_gen = False\n patch_idx += 1\n\n # add to feature\n if np.mod(feat_idx, nb_feats) == 0:\n vol_data_feats = lpatch\n \n else:\n vol_data_feats = np.concatenate([vol_data_feats, lpatch], np.ndim(lpatch)-1)\n feat_idx += 1\n\n if binary:\n vol_data_feats = vol_data_feats.astype(bool)\n\n if np.mod(feat_idx, nb_feats) == 0:\n feats_shape = vol_data_feats[1:]\n\n # yield previous batch if the new volume has different patch sizes\n if batch_shape is not None and (feats_shape != batch_shape):\n batch_idx = -1\n batch_shape = None\n print('switching patch sizes')\n yield np.vstack(vol_data_batch)\n\n # add to batch of volume data, unless the batch is currently empty\n if batch_idx == -1:\n vol_data_batch = [vol_data_feats]\n batch_shape = vol_data_feats[1:]\n else:\n vol_data_batch = [*vol_data_batch, vol_data_feats]\n\n # yield patch\n batch_idx += 1\n batch_done = batch_idx == batch_size - 1\n files_done = np.mod(fileidx + 1, nb_restart_cycle) == 0\n final_batch = yield_incomplete_final_batch and files_done and patch_idx == (nb_patches_per_vol-1)\n if final_batch: # verbose and \n print('last batch in %s cycle %d. nb_batch:%d' % (name, fileidx, len(vol_data_batch)))\n\n if batch_done or final_batch:\n batch_idx = -1\n q = np.vstack(vol_data_batch)\n yield q\n\n if empty_gen:\n raise ValueError('Patch generator was empty for file %s', volfiles[fileidx])\n\n\ndef patch(vol_data, # the volume\n patch_size, # patch size\n patch_stride=1, # patch stride (spacing)\n nb_labels_reshape=1, # number of labels for categorical resizing. 0 if no resizing\n keep_vol_size=False, # whether to keep the volume size on categorical resizing\n batch_size=1, # batch size\n collapse_2d=None,\n patch_rand=False,\n patch_rand_seed=None,\n variable_batch_size=False,\n infinite=False): # whether the generator should continue (re)-generating patches\n \"\"\"\n generate patches from volume for keras package\n\n Yields:\n patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape\n \"\"\"\n\n # some parameter setup\n assert batch_size >= 1, \"batch_size should be at least 1\"\n if patch_size is None:\n patch_size = vol_data.shape\n for pi,p in enumerate(patch_size):\n if p is None:\n patch_size[pi] = vol_data.shape[pi]\n batch_idx = -1\n if variable_batch_size:\n batch_size = yield\n\n\n # do while. if not infinite, will break at the end\n while True:\n # create patch generator\n gen = pl.patch_gen(vol_data, patch_size,\n stride=patch_stride,\n rand=patch_rand,\n rand_seed=patch_rand_seed)\n\n # go through the patch generator\n empty_gen = True\n for lpatch in gen:\n\n empty_gen = False\n # reshape output layer as categorical and prep proper size\n # print(lpatch.shape, nb_labels_reshape, keep_vol_size, patch_size)\n lpatch = _categorical_prep(lpatch, nb_labels_reshape, keep_vol_size, patch_size)\n\n if collapse_2d is not None:\n lpatch = np.squeeze(lpatch, collapse_2d + 1) # +1 due to batch in first dim\n\n # add this patch to the stack\n if batch_idx == -1:\n if batch_size == 1:\n patch_data_batch = lpatch\n else:\n patch_data_batch = np.zeros([batch_size, *lpatch.shape[1:]])\n patch_data_batch[0, :] = lpatch\n\n else:\n patch_data_batch[batch_idx+1, :] = lpatch\n\n # yield patch\n batch_idx += 1\n if batch_idx == batch_size - 1:\n batch_idx = -1\n batch_size_y = yield patch_data_batch\n if variable_batch_size:\n batch_size = batch_size_y\n\n assert not empty_gen, 'generator was empty. vol size was %s' % ''.join(['%d '%d for d in vol_data.shape])\n\n # if not infinite generation, yield the last batch and break the while\n if not infinite:\n if batch_idx >= 0:\n patch_data_batch = patch_data_batch[:(batch_idx+1), :]\n yield patch_data_batch\n break\n\n\ndef vol_seg(volpath,\n segpath,\n proc_vol_fn=None,\n proc_seg_fn=None,\n verbose=False,\n name='vol_seg', # name, optional\n ext='.npz',\n nb_restart_cycle=None, # number of files to restart after\n nb_labels_reshape=-1,\n collapse_2d=None,\n force_binary=False,\n nb_input_feats=1,\n relabel=None,\n vol_rand_seed=None,\n seg_binary=False,\n vol_subname='norm', # subname of volume\n seg_subname='aseg', # subname of segmentation\n **kwargs):\n \"\"\"\n generator with (volume, segmentation)\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n\n ** kwargs are any named arguments for vol(...),\n except verbose, data_proc_fn, ext, nb_labels_reshape and name\n (which this function will control when calling vol())\n \"\"\"\n\n # get vol generator\n vol_gen = vol(volpath, **kwargs, ext=ext,\n nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False,\n relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=name+' vol',\n verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)\n\n # get seg generator, matching nb_files\n # vol_files = [f.replace('norm', 'aseg') for f in _get_file_list(volpath, ext)]\n # vol_files = [f.replace('orig', 'aseg') for f in vol_files]\n vol_files = [f.replace(vol_subname, seg_subname) for f in _get_file_list(volpath, ext, vol_rand_seed)]\n seg_gen = vol(segpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d,\n force_binary=force_binary, relabel=relabel, vol_rand_seed=vol_rand_seed,\n data_proc_fn=proc_seg_fn, nb_labels_reshape=nb_labels_reshape, keep_vol_size=True,\n expected_files=vol_files, name=name+' seg', binary=seg_binary, verbose=False)\n\n # on next (while):\n while 1:\n # get input and output (seg) vols\n input_vol = next(vol_gen).astype('float16')\n output_vol = next(seg_gen).astype('float16') # was int8. Why? need float possibility...\n\n # output input and output\n yield (input_vol, output_vol)\n\n\ndef vol_cat(volpaths, # expect two folders in here\n crop=None, resize_shape=None, rescale=None, # processing parameters\n verbose=False,\n name='vol_cat', # name, optional\n ext='.npz',\n nb_labels_reshape=-1,\n vol_rand_seed=None,\n **kwargs): # named arguments for vol(...), except verbose, data_proc_fn, ext, nb_labels_reshape and name (which this function will control when calling vol()) \n \"\"\"\n generator with (volume, binary_bit) (random order)\n ONLY works with abtch size of 1 for now\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n \"\"\"\n\n folders = [f for f in sorted(os.listdir(volpaths))]\n\n # compute processing function\n proc_vol_fn = lambda x: nrn_proc.vol_proc(x, crop=crop, resize_shape=resize_shape,\n interp_order=2, rescale=rescale)\n\n # get vol generators\n generators = ()\n generators_len = ()\n for folder in folders:\n vol_gen = vol(os.path.join(volpaths, folder), **kwargs, ext=ext, vol_rand_seed=vol_rand_seed,\n data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=folder, verbose=False)\n generators_len += (len(_get_file_list(os.path.join(volpaths, folder), '.npz')), )\n generators += (vol_gen, )\n\n bake_data_test = False\n if bake_data_test:\n print('fake_data_test', file=sys.stderr)\n\n # on next (while):\n while 1:\n # build the random order stack\n order = np.hstack((np.zeros(generators_len[0]), np.ones(generators_len[1]))).astype('int')\n np.random.shuffle(order) # shuffle\n for idx in order:\n gen = generators[idx]\n\n # for idx, gen in enumerate(generators):\n z = np.zeros([1, 2]) #1,1,2 for categorical binary style\n z[0,idx] = 1 #\n # z[0,0,0] = idx\n\n data = next(gen).astype('float32')\n if bake_data_test and idx == 0:\n # data = data*idx\n data = -data\n\n yield (data, z)\n\n\ndef add_prior(gen,\n proc_vol_fn=None,\n proc_seg_fn=None,\n prior_type='location', # file-static, file-gen, location\n prior_file=None, # prior filename\n prior_feed='input', # input or output\n patch_stride=1,\n patch_size=None,\n batch_size=1,\n collapse_2d=None,\n extract_slice=None,\n force_binary=False,\n verbose=False,\n patch_rand=False,\n patch_rand_seed=None):\n \"\"\"\n #\n # add a prior generator to a given generator\n # with the number of patches in batch matching output of gen\n \"\"\"\n\n # get prior\n if prior_type == 'location':\n prior_vol = nd.volsize2ndgrid(vol_size)\n prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])\n prior_vol = np.expand_dims(prior_vol, axis=0) # reshape for model\n\n elif prior_type == 'file': # assumes a npz filename passed in prior_file\n with timer.Timer('loading prior', True):\n data = np.load(prior_file)\n prior_vol = data['prior'].astype('float16')\n\n else: # assumes a volume\n with timer.Timer('loading prior', True):\n prior_vol = prior_file.astype('float16')\n\n if force_binary:\n nb_labels = prior_vol.shape[-1]\n prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)\n prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)\n\n nb_channels = prior_vol.shape[-1]\n\n if extract_slice is not None:\n if isinstance(extract_slice, int):\n prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]\n else: # assume slices\n prior_vol = prior_vol[:, :, extract_slice, :]\n\n # get the prior to have the right volume [x, y, z, nb_channels]\n assert np.ndim(prior_vol) == 4 or np.ndim(prior_vol) == 3, \"prior is the wrong size\"\n\n # prior generator\n if patch_size is None:\n patch_size = prior_vol.shape[0:3]\n assert len(patch_size) == len(patch_stride)\n prior_gen = patch(prior_vol, [*patch_size, nb_channels],\n patch_stride=[*patch_stride, nb_channels],\n batch_size=batch_size,\n collapse_2d=collapse_2d,\n keep_vol_size=True,\n infinite=True,\n patch_rand=patch_rand,\n patch_rand_seed=patch_rand_seed,\n variable_batch_size=True,\n nb_labels_reshape=0)\n assert next(prior_gen) is None, \"bad prior gen setup\"\n\n # generator loop\n while 1:\n\n # generate input and output volumes\n gen_sample = next(gen)\n\n # generate prior batch\n gs_sample = _get_shape(gen_sample)\n prior_batch = prior_gen.send(gs_sample)\n\n yield (gen_sample, prior_batch)\n\n\ndef vol_prior(*args,\n proc_vol_fn=None,\n proc_seg_fn=None,\n prior_type='location', # file-static, file-gen, location\n prior_file=None, # prior filename\n prior_feed='input', # input or output\n patch_stride=1,\n patch_size=None,\n batch_size=1,\n collapse_2d=None,\n extract_slice=None,\n force_binary=False,\n nb_input_feats=1,\n verbose=False,\n vol_rand_seed=None,\n patch_rand=False,\n **kwargs): # anything else you'd like to pass to vol()\n \"\"\"\n generator that appends prior to (volume, segmentation) depending on input\n e.g. could be ((volume, prior), segmentation)\n \"\"\"\n\n patch_rand_seed = None\n if patch_rand:\n patch_rand_seed = np.random.random()\n\n\n # prepare the vol_seg\n vol_gen = vol(*args,\n **kwargs,\n collapse_2d=collapse_2d,\n force_binary=False,\n verbose=verbose,\n vol_rand_seed=vol_rand_seed)\n gen = vol(*args, **kwargs,\n proc_vol_fn=None,\n proc_seg_fn=None,\n collapse_2d=collapse_2d,\n extract_slice=extract_slice,\n force_binary=force_binary,\n verbose=verbose,\n patch_size=patch_size,\n patch_stride=patch_stride,\n batch_size=batch_size,\n vol_rand_seed=vol_rand_seed,\n patch_rand=patch_rand,\n patch_rand_seed=patch_rand_seed,\n nb_input_feats=nb_input_feats)\n\n # add prior to output\n pgen = add_prior(gen,\n proc_vol_fn=proc_vol_fn,\n proc_seg_fn=proc_seg_fn,\n prior_type=prior_type,\n prior_file=prior_file,\n prior_feed=prior_feed,\n patch_stride=patch_stride,\n patch_size=patch_size,\n batch_size=batch_size,\n collapse_2d=collapse_2d,\n extract_slice=extract_slice,\n force_binary=force_binary,\n verbose=verbose,\n patch_rand=patch_rand,\n patch_rand_seed=patch_rand_seed,\n vol_rand_seed=vol_rand_seed)\n\n # generator loop\n while 1:\n\n gen_sample, prior_batch = next(pgen)\n input_vol, output_vol = gen_sample\n\n if prior_feed == 'input':\n yield ([input_vol, prior_batch], output_vol)\n else:\n assert prior_feed == 'output'\n yield (input_vol, [output_vol, prior_batch])\n\n\ndef vol_seg_prior(*args,\n proc_vol_fn=None,\n proc_seg_fn=None,\n prior_type='location', # file-static, file-gen, location\n prior_file=None, # prior filename\n prior_feed='input', # input or output\n patch_stride=1,\n patch_size=None,\n batch_size=1,\n collapse_2d=None,\n extract_slice=None,\n force_binary=False,\n nb_input_feats=1,\n verbose=False,\n vol_rand_seed=None,\n patch_rand=None,\n **kwargs):\n \"\"\"\n generator that appends prior to (volume, segmentation) depending on input\n e.g. could be ((volume, prior), segmentation)\n \"\"\"\n\n\n patch_rand_seed = None\n if patch_rand:\n patch_rand_seed = np.random.random()\n\n # prepare the vol_seg\n gen = vol_seg(*args, **kwargs,\n proc_vol_fn=None,\n proc_seg_fn=None,\n collapse_2d=collapse_2d,\n extract_slice=extract_slice,\n force_binary=force_binary,\n verbose=verbose,\n patch_size=patch_size,\n patch_stride=patch_stride,\n batch_size=batch_size,\n vol_rand_seed=vol_rand_seed,\n patch_rand=patch_rand,\n patch_rand_seed=patch_rand_seed,\n nb_input_feats=nb_input_feats)\n\n # add prior to output\n pgen = add_prior(gen,\n proc_vol_fn=proc_vol_fn,\n proc_seg_fn=proc_seg_fn,\n prior_type=prior_type,\n prior_file=prior_file,\n prior_feed=prior_feed,\n patch_stride=patch_stride,\n patch_size=patch_size,\n batch_size=batch_size,\n collapse_2d=collapse_2d,\n extract_slice=extract_slice,\n force_binary=force_binary,\n verbose=verbose,\n patch_rand=patch_rand,\n patch_rand_seed=patch_rand_seed)\n\n # generator loop\n while 1:\n\n gen_sample, prior_batch = next(pgen)\n input_vol, output_vol = gen_sample\n\n if prior_feed == 'input':\n yield ([input_vol, prior_batch], output_vol)\n else:\n assert prior_feed == 'output'\n yield (input_vol, [output_vol, prior_batch])\n\n\ndef vol_prior_hack(*args,\n proc_vol_fn=None,\n proc_seg_fn=None,\n prior_type='location', # file-static, file-gen, location\n prior_file=None, # prior filename\n prior_feed='input', # input or output\n patch_stride=1,\n patch_size=None,\n batch_size=1,\n collapse_2d=None,\n extract_slice=None,\n force_binary=False,\n nb_input_feats=1,\n verbose=False,\n vol_rand_seed=None,\n **kwargs):\n \"\"\"\n \n \"\"\"\n # prepare the vol_seg\n gen = vol_seg_hack(*args, **kwargs,\n proc_vol_fn=None,\n proc_seg_fn=None,\n collapse_2d=collapse_2d,\n extract_slice=extract_slice,\n force_binary=force_binary,\n verbose=verbose,\n patch_size=patch_size,\n patch_stride=patch_stride,\n batch_size=batch_size,\n vol_rand_seed=vol_rand_seed,\n nb_input_feats=nb_input_feats)\n\n # get prior\n if prior_type == 'location':\n prior_vol = nd.volsize2ndgrid(vol_size)\n prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])\n prior_vol = np.expand_dims(prior_vol, axis=0) # reshape for model\n\n elif prior_type == 'file': # assumes a npz filename passed in prior_file\n with timer.Timer('loading prior', True):\n data = np.load(prior_file)\n prior_vol = data['prior'].astype('float16')\n else : # assumes a volume\n with timer.Timer('astyping prior', verbose):\n prior_vol = prior_file\n if not (prior_vol.dtype == 'float16'):\n prior_vol = prior_vol.astype('float16')\n\n if force_binary:\n nb_labels = prior_vol.shape[-1]\n prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)\n prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)\n\n nb_channels = prior_vol.shape[-1]\n\n if extract_slice is not None:\n if isinstance(extract_slice, int):\n prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]\n else: # assume slices\n prior_vol = prior_vol[:, :, extract_slice, :]\n\n # get the prior to have the right volume [x, y, z, nb_channels]\n assert np.ndim(prior_vol) == 4 or np.ndim(prior_vol) == 3, \"prior is the wrong size\"\n\n # prior generator\n if patch_size is None:\n patch_size = prior_vol.shape[0:3]\n assert len(patch_size) == len(patch_stride)\n prior_gen = patch(prior_vol, [*patch_size, nb_channels],\n patch_stride=[*patch_stride, nb_channels],\n batch_size=batch_size,\n collapse_2d=collapse_2d,\n keep_vol_size=True,\n infinite=True,\n #variable_batch_size=True, # this\n nb_labels_reshape=0)\n # assert next(prior_gen) is None, \"bad prior gen setup\"\n\n # generator loop\n while 1:\n\n # generate input and output volumes\n input_vol = next(gen)\n\n if verbose and np.all(input_vol.flat == 0):\n print(\"all entries are 0\")\n\n # generate prior batch\n # with timer.Timer(\"with send?\"):\n # prior_batch = prior_gen.send(input_vol.shape[0])\n prior_batch = next(prior_gen)\n\n if prior_feed == 'input':\n yield ([input_vol, prior_batch], input_vol)\n else:\n assert prior_feed == 'output'\n yield (input_vol, [input_vol, prior_batch])\n\n\ndef vol_seg_hack(volpath,\n segpath,\n proc_vol_fn=None,\n proc_seg_fn=None,\n verbose=False,\n name='vol_seg', # name, optional\n ext='.npz',\n nb_restart_cycle=None, # number of files to restart after\n nb_labels_reshape=-1,\n collapse_2d=None,\n force_binary=False,\n nb_input_feats=1,\n relabel=None,\n vol_rand_seed=None,\n seg_binary=False,\n vol_subname='norm', # subname of volume\n seg_subname='aseg', # subname of segmentation\n **kwargs):\n \"\"\"\n generator with (volume, segmentation)\n\n verbose is passed down to the base generators.py primitive generator (e.g. vol, here)\n\n ** kwargs are any named arguments for vol(...),\n except verbose, data_proc_fn, ext, nb_labels_reshape and name\n (which this function will control when calling vol())\n \"\"\"\n\n # get vol generator\n vol_gen = vol(volpath, **kwargs, ext=ext,\n nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False,\n relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=name+' vol',\n verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)\n \n\n # on next (while):\n while 1:\n # get input and output (seg) vols\n input_vol = next(vol_gen).astype('float16')\n\n # output input and output\n yield input_vol\n\n\ndef vol_sr_slices(volpath,\n nb_input_slices,\n nb_slice_spacing,\n batch_size=1,\n ext='.npz',\n vol_rand_seed=None,\n nb_restart_cycle=None,\n name='vol_sr_slices',\n rand_slices=True, # randomize init slice order (i.e. across entries per batch) given a volume\n simulate_whole_sparse_vol=False,\n verbose=False\n ):\n \"\"\"\n default generator for slice-wise super resolution\n \"\"\"\n\n def indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing):\n idx = start_indices[0]\n output_batch = np.expand_dims(vol_data[:,:,idx:idx+nb_slices_in_subvol], 0)\n input_batch = np.expand_dims(vol_data[:,:,idx:(idx+nb_slices_in_subvol):(nb_slice_spacing+1)], 0)\n \n for idx in start_indices[1:]:\n out_sel = np.expand_dims(vol_data[:,:,idx:idx+nb_slices_in_subvol], 0)\n output_batch = np.vstack([output_batch, out_sel])\n input_batch = np.vstack([input_batch, np.expand_dims(vol_data[:,:,idx:(idx+nb_slices_in_subvol):(nb_slice_spacing+1)], 0)])\n output_batch = np.reshape(output_batch, [batch_size, -1, output_batch.shape[-1]])\n \n return (input_batch, output_batch)\n\n\n print('vol_sr_slices: SHOULD PROPERLY RANDOMIZE accross different subjects', file=sys.stderr)\n \n volfiles = _get_file_list(volpath, ext, vol_rand_seed)\n nb_files = len(volfiles)\n\n if nb_restart_cycle is None:\n nb_restart_cycle = nb_files\n\n # compute the number of slices we'll need in a subvolume\n nb_slices_in_subvol = (nb_input_slices - 1) * (nb_slice_spacing + 1) + 1\n\n # iterate through files\n fileidx = -1\n while 1:\n fileidx = np.mod(fileidx + 1, nb_restart_cycle)\n if verbose and fileidx == 0:\n print('starting %s cycle' % name)\n\n\n try:\n vol_data = _load_medical_volume(os.path.join(volpath, volfiles[fileidx]), ext, verbose)\n except:\n debug_error_msg = \"#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s\"\n print(debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0]))\n raise\n\n # compute some random slice\n nb_slices = vol_data.shape[2]\n nb_start_slices = nb_slices - nb_slices_in_subvol + 1\n\n # prepare batches\n if simulate_whole_sparse_vol: # if essentially simulate a whole sparse volume for consistent inputs, and yield slices like that:\n init_slice = 0\n if rand_slices:\n init_slice = np.random.randint(0, high=nb_start_slices-1)\n\n all_start_indices = list(range(init_slice, nb_start_slices, nb_slice_spacing+1))\n\n for batch_start in range(0, len(all_start_indices), batch_size*(nb_input_slices-1)):\n start_indices = [all_start_indices[s] for s in range(batch_start, batch_start + batch_size)]\n input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)\n yield (input_batch, output_batch)\n \n # if just random slices, get a batch of random starts from this volume and that's it.\n elif rand_slices:\n assert not simulate_whole_sparse_vol\n start_indices = np.random.choice(range(nb_start_slices), size=batch_size, replace=False)\n input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)\n yield (input_batch, output_batch)\n\n # go slice by slice (overlapping regions)\n else:\n for batch_start in range(0, nb_start_slices, batch_size):\n start_indices = list(range(batch_start, batch_start + batch_size))\n input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)\n yield (input_batch, output_batch)\n \n\ndef img_seg(volpath,\n segpath,\n batch_size=1,\n verbose=False,\n nb_restart_cycle=None,\n name='img_seg', # name, optional\n ext='.png',\n vol_rand_seed=None,\n **kwargs):\n \"\"\"\n generator for (image, segmentation)\n \"\"\"\n\n def imggen(path, ext, nb_restart_cycle=None):\n \"\"\"\n TODO: should really use the volume generators for this\n \"\"\"\n files = _get_file_list(path, ext, vol_rand_seed)\n if nb_restart_cycle is None:\n nb_restart_cycle = len(files)\n\n idx = -1\n while 1:\n idx = np.mod(idx+1, nb_restart_cycle)\n im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]\n yield im.reshape((1,) + im.shape)\n\n img_gen = imggen(volpath, ext, nb_restart_cycle)\n seg_gen = imggen(segpath, ext)\n\n # on next (while):\n while 1:\n input_vol = np.vstack([next(img_gen).astype('float16')/255 for i in range(batch_size)])\n input_vol = np.expand_dims(input_vol, axis=-1)\n\n output_vols = [np_utils.to_categorical(next(seg_gen).astype('int8'), num_classes=2) for i in range(batch_size)]\n output_vol = np.vstack([np.expand_dims(f, axis=0) for f in output_vols])\n\n # output input and output\n yield (input_vol, output_vol)\n\n\n# Some internal use functions\n\ndef _get_file_list(volpath, ext=None, vol_rand_seed=None):\n \"\"\"\n get a list of files at the given path with the given extension\n \"\"\"\n files = [f for f in sorted(os.listdir(volpath)) if ext is None or f.endswith(ext)]\n if vol_rand_seed is not None:\n np.random.seed(vol_rand_seed)\n files = np.random.permutation(files).tolist()\n return files\n\n\ndef _load_medical_volume(filename, ext, verbose=False):\n \"\"\"\n load a medical volume from one of a number of file types\n \"\"\"\n with timer.Timer('load_vol', verbose >= 2):\n if ext == '.npz':\n vol_file = np.load(filename)\n vol_data = vol_file['vol_data']\n elif ext == 'npy':\n vol_data = np.load(filename)\n elif ext == '.mgz' or ext == '.nii' or ext == '.nii.gz':\n vol_med = nib.load(filename)\n vol_data = vol_med.get_data()\n else:\n raise ValueError(\"Unexpected extension %s\" % ext)\n\n return vol_data\n\n\ndef _categorical_prep(vol_data, nb_labels_reshape, keep_vol_size, patch_size):\n\n if nb_labels_reshape > 1:\n \n lpatch = _to_categorical(vol_data, nb_labels_reshape, keep_vol_size)\n # if keep_vol_size:\n # lpatch = np.reshape(lpatch, [*patch_size, nb_labels_reshape])\n elif nb_labels_reshape == 1:\n lpatch = np.expand_dims(vol_data, axis=-1)\n else:\n assert nb_labels_reshape == 0\n lpatch = vol_data\n lpatch = np.expand_dims(lpatch, axis=0)\n\n return lpatch\n\n\n\ndef _to_categorical(y, num_classes=None, reshape=True):\n \"\"\"\n # copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float\n\n Converts a class vector (integers) to binary class matrix.\n\n E.g. for use with categorical_crossentropy.\n\n # Arguments\n y: class vector to be converted into a matrix\n (integers from 0 to num_classes).\n num_classes: total number of classes.\n\n # Returns\n A binary matrix representation of the input.\n \"\"\"\n oshape = y.shape\n y = np.array(y, dtype='int').ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), bool)\n categorical[np.arange(n), y] = 1\n \n if reshape:\n categorical = np.reshape(categorical, [*oshape, num_classes])\n \n return categorical\n\ndef _relabel(vol_data, labels, forcecheck=False):\n \n if forcecheck:\n vd = np.unique(vol_data.flat)\n assert len(vd) == len(labels), \"number of given labels does not match number of actual labels\"\n \n # by doing zeros, any label not in labels gets left to 0\n new_vol_data = np.zeros(vol_data.shape, vol_data.dtype)\n for idx, val in np.ndenumerate(labels):\n new_vol_data[vol_data == val] = idx\n \n return new_vol_data\n\n\ndef _npz_headers(npz, namelist=None):\n \"\"\"\n taken from https://stackoverflow.com/a/43223420\n\n Takes a path to an .npz file, which is a Zip archive of .npy files.\n Generates a sequence of (name, shape, np.dtype).\n\n namelist is a list with variable names, ending in '.npy'. \n e.g. if variable 'var' is in the file, namelist could be ['var.npy']\n \"\"\"\n with zipfile.ZipFile(npz) as archive:\n if namelist is None:\n namelist = archive.namelist()\n\n for name in namelist:\n if not name.endswith('.npy'):\n continue\n\n npy = archive.open(name)\n version = np.lib.format.read_magic(npy)\n shape, fortran, dtype = np.lib.format._read_array_header(npy, version)\n yield name[:-4], shape, dtype\n\ndef _get_shape(x):\n if isinstance(x, (list, tuple)):\n return _get_shape(x[0])\n else:\n return x.shape[0]\n" ]
[ [ "numpy.load", "numpy.random.random", "numpy.max", "numpy.lib.format.read_magic", "numpy.transpose", "numpy.random.randint", "numpy.isfinite", "numpy.ndim", "numpy.arange", "numpy.expand_dims", "numpy.vstack", "numpy.mod", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.lib.format._read_array_header", "numpy.random.shuffle", "numpy.squeeze", "numpy.ndenumerate", "numpy.isnan", "numpy.random.seed", "numpy.sum", "numpy.random.permutation", "numpy.ones", "numpy.all", "numpy.unique" ] ]
136823xuewei/gait-recognition
[ "03e2c8d5660018897a5fa277470b839bdd9df4be" ]
[ "classification/casiab_performance.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom matplotlib import pyplot as plt\nimport tensorflow as tf\nimport seaborn as sb\nimport pandas as pd\nimport numpy as np\nimport math\nimport time\nimport cv2\nimport os\n\n\ntf.reset_default_graph()\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n# tip: if you run into problems with TensorBoard\n# clear the contents of this directory, re-run this script\n# then restart TensorBoard to see the result\n# LOGDIR = './graphs' \nmodel_frames = 64\n\nNUM_CLASSES = 74\nNUM_PIXELS = 88 * 128\n\nTRAIN_STEPS = 0\nBATCH_SIZE = 1 << 5\n\nMODEL_ANGLE_DICT = {'000': True, '018': False, '036': False, '054': False, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False}\nTEST_ANGLE_DICT = {'000': False, '018': False, '036': False, '054': True, '072': False, '090': False, '108': False, '126': False, '144': False, '162': False, '180': False}\n\nLEARNING_RATE = 1e-4\n\nDATA_PATH = 'Generated_full_data_GEI'\nstart_time = time.time()\n\nkeep_prob = 0.5 #dropout (keep probability)\n\n\ndef del_files(path):\n for root, dirs, files in os.walk(path):\n for name in files:\n if name.startswith(\".\"):\n os.remove(os.path.join(root, name))\n print(\"Delete File: \" + os.path.join(root, name))\n\n\ndef get_label(_index, num_classes):\n # label = np.zeros(shape=[num_classes], dtype='float32')\n # label[int(_index) - 1] = 1\n # return label\n return (int(_index) - 1)\n \n\ndef load_images_from_folder(folder, model_angle_dict, test_angle_dict):\n train_frames = []\n train_labels = []\n probe_frames = []\n probe_labels = []\n\n for i in xrange(11):\n train_frames.append([])\n\n for i in xrange(11):\n train_labels.append([])\n\n for i in xrange(11):\n probe_frames.append([])\n\n for i in xrange(11):\n probe_labels.append([])\n\n\n for human_id in os.listdir(os.path.join(folder, 'train')):\n if int(human_id) < 74:\n continue\n \n for angle in os.listdir(os.path.join(folder, 'train', human_id)):\n # if not model_angle_dict[angle]:\n # continue\n\n for _type in os.listdir(os.path.join(folder, 'train', human_id, angle)):\n img = cv2.imread(os.path.join(folder, 'train', human_id, angle, _type), 0)\n if img is not None:\n train_frames[int(angle) // 18].append(img.flatten())\n train_labels[int(angle) // 18].append(get_label(human_id, 124))\n \n for human_id in os.listdir(os.path.join(folder, 'test')):\n for angle in os.listdir(os.path.join(folder, 'test', human_id)):\n # if not test_angle_dict[angle]:\n # continue\n\n for _type in os.listdir(os.path.join(folder, 'test', human_id, angle)):\n img = cv2.imread(os.path.join(folder, 'test', human_id, angle, _type), 0)\n if img is not None:\n probe_frames[int(angle) // 18].append(img.flatten())\n probe_labels[int(angle) // 18].append(get_label(human_id, 124))\n \n return (train_frames, train_labels, probe_frames, probe_labels)\n\n\ndel_files(DATA_PATH)\n(train_frames, train_labels, probe_frames, probe_labels) = load_images_from_folder(DATA_PATH, MODEL_ANGLE_DICT, TEST_ANGLE_DICT)\n\n# Define inputs\nwith tf.name_scope('input'):\n images = tf.placeholder(tf.float32, [None, NUM_PIXELS], name=\"pixels\")\n labels = tf.placeholder(tf.float32, [None, NUM_CLASSES], name=\"labels\")\n\n# dropout_prob = tf.placeholder_with_default(1.0, shape=())\n \n# Create some wrappers for simplicity\ndef conv2d(x, W, b, strides=1):\n # Conv2D wrapper, with bias and relu activation\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\n x = tf.nn.bias_add(x, b)\n return tf.nn.relu(x)\n\n\ndef maxpool2d(x, k=2):\n # MaxPool2D wrapper\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, 2, 2, 1],\n padding='SAME')\n\n\n# Create model\ndef conv_net(x, weights, biases, dropout):\n # Reshape input picture\n x = tf.reshape(x, shape=[-1, 128, 88, 1])\n\n # Convolution Layer\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\n # Max Pooling (down-sampling)\n conv1 = maxpool2d(conv1, k=2)\n conv1 = tf.contrib.layers.batch_norm(conv1)\n\n # Convolution Layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n # Max Pooling (down-sampling)\n conv2 = maxpool2d(conv2, k=3)\n conv2 = tf.contrib.layers.batch_norm(conv2)\n\n # Fully connected layer\n # Reshape conv2 output to fit fully connected layer input\n fc3 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])\n fc3 = tf.add(tf.matmul(fc3, weights['wd1']), biases['bd1'])\n fc3 = tf.nn.relu(fc3)\n # Apply Dropout\n # fc1 = tf.nn.dropout(fc1, dropout)\n # fc3 = tf.nn.dropout(fc3, dropout_prob)\n\n # # Output, class prediction\n fc4 = tf.add(tf.matmul(fc3, weights['fc4']), biases['fc4'])\n return fc3\n\n# Store layers weight & bias\ninitializer = tf.contrib.layers.xavier_initializer()\nweights = {\n # 7x7 conv, 1 input, 18 outputs\n 'wc1': tf.Variable(initializer([7, 7, 1, 18])),\n # 5x5 conv, 18 inputs, 45 outputs\n 'wc2': tf.Variable(initializer([5, 5, 18, 45])),\n # fully connected, 7*7*64 inputs, 1024 outputs\n 'wd1': tf.Variable(initializer([32*22*45, 1024])),\n # # 1024 inputs, 10 outputs (class prediction)\n 'fc4': tf.Variable(initializer([1024, NUM_CLASSES]))\n}\n\nbiases = {\n 'bc1': tf.Variable(tf.random_normal([18])),\n 'bc2': tf.Variable(tf.random_normal([45])),\n 'bd1': tf.Variable(tf.random_normal([1024])),\n 'fc4': tf.Variable(tf.random_normal([NUM_CLASSES]))\n}\n \ny = conv_net(images, weights, biases, keep_prob)\n\nsess.run(tf.global_variables_initializer())\n\nsaver = tf.train.Saver()\n\nsaver.restore(sess, \"./full_tri_model/model.ckpt\")\nprint(\"%d frames model restored.\"%model_frames)\n\nprint(' ', end=',')\n\nfor i in xrange(11):\n print('%4d'%(i * 18), end=',')\n\nprint_map = np.zeros(shape=(11, 11), dtype=np.float32)\ngallery_encoding = []\nprobe_encoding = []\n\nfor a in range(11):\n gallery_encoding.append(sess.run(y, feed_dict={images: train_frames[a]}))\n\nfor a in range(11):\n probe_encoding.append(sess.run(y, feed_dict={images: probe_frames[a]}))\n\nfor a in range(11):\n print('')\n print('%3d'%(a * 18), end=',')\n\n for b in range(11): \n simlarity = np.zeros(shape=[len(probe_encoding[b]), len(gallery_encoding[a])], dtype=np.float32)\n pred_label = np.zeros(shape=[len(probe_encoding[b])], dtype=np.int)\n \n for i in range(len(probe_encoding[b])):\n for j in range(len(gallery_encoding[a])):\n simlarity[i][j] = np.exp(-(((probe_encoding[b][i] - gallery_encoding[a][j])/1024.0)**2).sum())\n\n # import pdb\n\n # pdb.set_trace()\n \n tmp_index = simlarity[i].argmax()\n pred_label[i] = train_labels[a][tmp_index]\n # if not (pred_label[i] == probe_labels[i]):\n # print(str((pred_label[i] == probe_labels[i])) + ' ' + str(pred_label[i]) + ' ' + str(probe_labels[i]))\n \n acc = np.sum(pred_label[:] == probe_labels[b][:])\n # print_map[b][10 - a] = 100.0 * acc/(len(probe_labels[b])*1.0)\n print_map[b][a] = 100.0 * acc/(len(probe_labels[b])*1.0)\n print('%.2f'%(100.0 * acc/(len(probe_labels[b])*1.0)), end=',')\nprint(print_map)\n\n\ngrid_visualization = np.array(print_map.transpose())\ngrid_visualization.shape = (11, 11)\nsb.heatmap(grid_visualization, cmap='Oranges')\nplt.xticks(np.arange(11) + 0.5, xrange(0, 181, 18))\nplt.yticks(np.arange(11) + 0.5, xrange(180, -1, -18))\nplt.xlabel('Gallery Angle')\nplt.ylabel('Probe Angle')\n\nplt.show()" ]
[ [ "tensorflow.contrib.layers.batch_norm", "tensorflow.nn.conv2d", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.global_variables_initializer", "tensorflow.random_normal", "tensorflow.train.Saver", "tensorflow.ConfigProto", "numpy.arange", "tensorflow.nn.bias_add", "tensorflow.nn.max_pool", "tensorflow.nn.relu", "numpy.zeros", "tensorflow.placeholder", "tensorflow.name_scope", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel", "numpy.sum", "tensorflow.reset_default_graph", "matplotlib.pyplot.ylabel", "tensorflow.GPUOptions" ] ]
WildMeOrg/wildbook-ia
[ "ac433d4f2a47b1d905c421a36c497f787003afc3" ]
[ "wbia/expt/test_result.py" ]
[ "# -*- coding: utf-8 -*-\n# TODO: find unused functions and kill them\nimport logging\nimport copy\nimport operator\nimport utool as ut\nimport vtool as vt\nimport numpy as np\nimport itertools as it\nfrom functools import partial, reduce\nfrom wbia.expt import cfghelpers\nfrom wbia.expt import experiment_helpers\n\nprint, rrr, profile = ut.inject2(__name__)\nlogger = logging.getLogger('wbia')\n\n\ndef build_cmsinfo(cm_list, qreq_):\n r\"\"\"\n Helper function to report results over multiple queries (chip matches).\n Basically given a group of queries of the same name, we only care if one of\n them is correct. This emulates encounters.\n\n Runs queries of a specific configuration returns the best rank of each\n query.\n\n Args:\n cm_list (list): list of chip matches\n qreq_ (QueryRequest): request that computed the chip matches.\n\n Returns:\n dict: cmsinfo - info about multiple chip matches cm_list\n\n CommandLine:\n python -m wbia get_query_result_info\n python -m wbia get_query_result_info:0 --db lynx \\\n -a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1\n python -m wbia get_query_result_info:0 --db lynx \\\n -a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1 --cmd\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> qreq_ = wbia.main_helpers.testdata_qreq_(a=[':qindex=0:3,dindex=0:5'])\n >>> cm_list = qreq_.execute()\n >>> cmsinfo = build_cmsinfo(cm_list, qreq_)\n >>> print(ut.repr2(cmsinfo))\n\n Ignore:\n wbia -e rank_cmc --db humpbacks -a :has_any=hasnotch,mingt=2 \\\n -t :proot=BC_DTW --show --nocache-big\n\n wbia -e rank_cmc --db humpbacks -a :is_known=True,mingt=2 \\\n -t :pipeline_root=BC_DTW\n\n wbia -e rank_cmc --db humpbacks -a :is_known=True \\\n -t :pipeline_root=BC_DTW \\\n --qaid=1,9,15,16,18 --daid-override=1,9,15,16,18,21,22 \\\n --show --debug-depc\n\n --clear-all-depcache\n \"\"\"\n ibs = qreq_.ibs\n\n qaids = qreq_.qaids\n daids = qreq_.daids\n # Get the groundtruth ranks and accuracy measures\n qx2_cminfo = []\n for cm in cm_list:\n if hasattr(cm, 'extend_results'):\n cminfo = cm.extend_results(qreq_).summarize(qreq_)\n else:\n cminfo = cm.summarize(qreq_)\n qx2_cminfo.append(cminfo)\n cmsinfo = ut.dict_stack(qx2_cminfo, 'qx2_')\n cmsinfo['qx2_gt_rank'] = ut.replace_nones(cmsinfo['qx2_gt_rank'], -1)\n\n if False:\n qx2_gtaids = ibs.get_annot_groundtruth(qaids, daid_list=daids)\n qx2_avepercision = np.array(\n [\n cm.get_average_percision(ibs=ibs, gt_aids=gt_aids)\n for (cm, gt_aids) in zip(cm_list, qx2_gtaids)\n ]\n )\n cmsinfo['qx2_avepercision'] = qx2_avepercision\n\n # Compute mAP score # TODO: use mAP score\n # (Actually map score doesn't make much sense if using name scoring\n # mAP = qx2_avepercision[~np.isnan(qx2_avepercision)].mean() # NOQA\n\n qaids = qreq_.qaids\n # qaids2 = [cm.qaid for cm in cm_list]\n # qnids = qreq_.get_qreq_annot_nids(qaids) # TODO: use new nid getter\n qnids = ibs.get_annot_nids(qaids)\n\n unique_dnids = np.unique(ibs.get_annot_nids(qreq_.daids))\n unique_qnids, groupxs = ut.group_indices(qnids)\n cm_group_list = ut.apply_grouping(cm_list, groupxs)\n qnid2_aggnamescores = {}\n\n qnx2_nameres_info = []\n\n # Ranked list aggregation over groups of query annots\n nameres_info_list = []\n for qnid, cm_group in zip(unique_qnids, cm_group_list):\n nid2_name_score_group = [\n dict([(nid, cm.name_score_list[nidx]) for nid, nidx in cm.nid2_nidx.items()])\n for cm in cm_group\n ]\n aligned_name_scores = np.array(\n [\n ut.dict_take(nid_to_name_score, unique_dnids.tolist(), -np.inf)\n for nid_to_name_score in nid2_name_score_group\n ]\n ).T\n name_score_list = np.nanmax(aligned_name_scores, axis=1)\n qnid2_aggnamescores[qnid] = name_score_list\n # sort\n sortx = name_score_list.argsort()[::-1]\n sorted_namescores = name_score_list[sortx]\n sorted_dnids = unique_dnids[sortx]\n\n # infer agg name results\n success = sorted_dnids == qnid\n failure = np.logical_and(~success, sorted_dnids > 0)\n gt_name_rank = None if not np.any(success) else np.where(success)[0][0]\n gf_name_rank = None if not np.any(failure) else np.nonzero(failure)[0][0]\n\n gt_nid = sorted_dnids[gt_name_rank]\n gf_nid = sorted_dnids[gf_name_rank]\n gt_name_score = sorted_namescores[gt_name_rank]\n gf_name_score = sorted_namescores[gf_name_rank]\n\n if gt_name_score <= 0:\n # ensure failure cases are loud give them the worst possible rank\n # instead of a random one.\n if hasattr(qreq_, 'dnids'):\n gt_name_rank = len(qreq_.dnids) + 1\n else:\n dnids = list(set(ibs.get_annot_nids(qreq_.daids)))\n gt_name_rank = len(dnids) + 1\n qnx2_nameres_info = {}\n qnx2_nameres_info['qnid'] = qnid\n qnx2_nameres_info['gt_nid'] = gt_nid\n qnx2_nameres_info['gf_nid'] = gf_nid\n qnx2_nameres_info['gt_name_rank'] = gt_name_rank\n qnx2_nameres_info['gf_name_rank'] = gf_name_rank\n qnx2_nameres_info['gt_name_score'] = gt_name_score\n qnx2_nameres_info['gf_name_score'] = gf_name_score\n\n nameres_info_list.append(qnx2_nameres_info)\n nameres_info = ut.dict_stack(nameres_info_list, 'qnx2_')\n cmsinfo.update(nameres_info)\n\n return cmsinfo\n\n\ndef combine_testres_list(ibs, testres_list):\n \"\"\"\n combine test results over multiple annot configs\n\n The combination of pipeline and annotation config is indexed by cfgx.\n A cfgx corresponds to a unique query request\n\n CommandLine:\n python -m wbia --tf combine_testres_list\n\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show\n python -m wbia --tf -draw_rank_cmc --db PZ_Master1 --show\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default\n python -m wbia --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default\n\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.expt import harness\n >>> ibs, testres = harness.testdata_expts('PZ_MTEST', ['varysize'])\n \"\"\"\n import copy\n from wbia.expt import annotation_configs\n\n acfg_list = [tr.acfg for tr in testres_list]\n acfg_lbl_list = annotation_configs.get_varied_acfg_labels(acfg_list)\n\n flat_acfg_list = annotation_configs.flatten_acfg_list(acfg_list)\n nonvaried_acfg, varied_acfg_list = ut.partition_varied_cfg_list(flat_acfg_list)\n\n def combine_lbls(lbl, acfg_lbl):\n if len(lbl) == 0:\n return acfg_lbl\n if len(acfg_lbl) == 0:\n return lbl\n return lbl + '+' + acfg_lbl\n\n # TODO: depcirate cfg_dict list for pcfg_list (I think)\n\n agg_cfg_list = ut.flatten([tr.cfg_list for tr in testres_list])\n agg_cfgx2_qreq_ = ut.flatten([tr.cfgx2_qreq_ for tr in testres_list])\n agg_cfgdict_list = ut.flatten([tr.cfgdict_list for tr in testres_list])\n agg_cfgx2_cmsinfo = ut.flatten([tr.cfgx2_cmsinfo for tr in testres_list])\n agg_varied_acfg_list = ut.flatten(\n [[acfg] * len(tr.cfg_list) for tr, acfg in zip(testres_list, varied_acfg_list)]\n )\n agg_cfgx2_lbls = ut.flatten(\n [\n [combine_lbls(lbl, acfg_lbl) for lbl in tr.cfgx2_lbl]\n for tr, acfg_lbl in zip(testres_list, acfg_lbl_list)\n ]\n )\n\n agg_cfgx2_acfg = ut.flatten(\n [\n [copy.deepcopy(acfg)] * len(tr.cfg_list)\n for tr, acfg in zip(testres_list, acfg_list)\n ]\n )\n\n big_testres = TestResult(\n agg_cfg_list, agg_cfgx2_lbls, agg_cfgx2_cmsinfo, agg_cfgx2_qreq_\n )\n\n # Give the big test result an acfg that is common between everything\n big_testres.acfg = annotation_configs.unflatten_acfgdict(nonvaried_acfg)\n # TODO: cfgdict_list -> pcfg_list\n big_testres.cfgdict_list = agg_cfgdict_list # TODO: depricate\n\n big_testres.common_acfg = annotation_configs.compress_aidcfg(big_testres.acfg)\n big_testres.common_cfgdict = reduce(ut.dict_intersection, big_testres.cfgdict_list)\n big_testres.varied_acfg_list = agg_varied_acfg_list\n big_testres.nonvaried_acfg = nonvaried_acfg\n big_testres.varied_cfg_list = [\n ut.delete_dict_keys(cfgdict.copy(), list(big_testres.common_cfgdict.keys()))\n for cfgdict in big_testres.cfgdict_list\n ]\n big_testres.acfg_list = acfg_list\n big_testres.cfgx2_acfg = agg_cfgx2_acfg\n big_testres.cfgx2_pcfg = agg_cfgdict_list\n\n assert len(agg_cfgdict_list) == len(agg_cfgx2_acfg)\n\n # big_testres.acfg\n testres = big_testres\n # big_testres = testres\n return testres\n\n\n@ut.reloadable_class\nclass TestResult(ut.NiceRepr):\n \"\"\"\n CommandLine:\n export SMK_PIPE=\"smk:nwords=[64000],sv=[False]\"\n wbia TestResult --db PZ_MTEST -a ctrl -p $SMK_PIPE\n wbia TestResult --db Oxford -a oxford -p $SMK_PIPE\n\n Example:\n >>> # Script\n >>> from wbia.init import main_helpers\n >>> import utool as ut\n >>> ibs, testres = main_helpers.testdata_expts()\n >>> testres.help()\n >>> actions = testres.get_actions()\n >>> testres.map_score()\n >>> ut.qtensure()\n >>> prompt = ut.InteractivePrompt(actions)\n >>> prompt.loop()\n \"\"\"\n\n def __init__(testres, cfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_):\n assert len(cfg_list) == len(cfgx2_lbl), 'bad lengths1: %r != %r' % (\n len(cfg_list),\n len(cfgx2_lbl),\n )\n assert len(cfgx2_qreq_) == len(cfgx2_lbl), 'bad lengths2: %r != %r' % (\n len(cfgx2_qreq_),\n len(cfgx2_lbl),\n )\n assert len(cfgx2_cmsinfo) == len(cfgx2_lbl), 'bad lengths3: %r != %r' % (\n len(cfgx2_cmsinfo),\n len(cfgx2_lbl),\n )\n # TODO rename cfg_list to pcfg_list\n testres.cfg_list = cfg_list\n testres.cfgx2_lbl = cfgx2_lbl\n testres.cfgx2_cmsinfo = cfgx2_cmsinfo\n testres.cfgx2_qreq_ = cfgx2_qreq_\n # TODO: uncomment\n # testres.cfgx2_acfg\n # testres.cfgx2_qcfg\n # testres.acfg_list = None #\n testres.lbl = None\n testres.testnameid = None\n\n @classmethod\n def from_cms(cls, cm_list, qreq_):\n cfg_list = [qreq_.qparams] # should actually be the specified dict\n cfgx2_lbl = ['unspecified']\n cmsinfo = build_cmsinfo(cm_list, qreq_)\n cfgx2_cmsinfo = [cmsinfo]\n cfgx2_qreq_ = [qreq_]\n testres = cls(cfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_)\n return testres\n\n def __str__(testres):\n return testres.reconstruct_test_flags()\n\n # def __repr__(testres):\n # return testres._custom_str()\n\n def __nice__(testres):\n dbname = None if testres.ibs is None else testres.ibs.get_dbname()\n # hashkw = dict(_new=True, pathsafe=False)\n infostr_ = 'nCfg=%s' % testres.nConfig\n if testres.nConfig == 1:\n qreq_ = testres.cfgx2_qreq_[0]\n infostr_ += ' nQ=%s, nD=%s %s' % (\n len(qreq_.qaids),\n len(qreq_.daids),\n qreq_.get_pipe_hashid(),\n )\n # nD=%s %s' % (, len(testres.daids), testres.get_pipe_hashid())\n nice = '%s %s' % (dbname, infostr_)\n return nice\n\n @property\n def ibs(testres):\n ibs_list = []\n for qreq_ in testres.cfgx2_qreq_:\n try:\n ibs_list.append(qreq_.ibs)\n except AttributeError:\n ibs_list.append(qreq_.depc.controller)\n ibs = ibs_list[0]\n for ibs_ in ibs_list:\n assert (\n ibs.get_dbdir() == ibs_.get_dbdir()\n ), 'all requests must use the same database'\n return ibs\n\n @property\n def qaids(testres):\n assert (\n testres.has_constant_qaids()\n ), 'must have constant qaids to use this property'\n return testres.cfgx2_qaids[0]\n # return testres._qaids\n\n @property\n def nConfig(testres):\n # FIXME: this is the number of requests not the number of\n # pipeline configurations\n return len(testres.cfg_list)\n\n @property\n def unique_pcfgs(testres):\n unique_idxs = ut.unique_indices(map(id, testres.cfgx2_pcfg))\n return ut.take(testres.cfgx2_pcfg, unique_idxs)\n\n @property\n def nQuery(testres):\n return len(testres.qaids)\n\n @property\n def rank_mat(testres):\n return testres.get_rank_mat()\n\n @property\n def cfgx2_daids(testres):\n daids_list = [qreq_.daids for qreq_ in testres.cfgx2_qreq_]\n return daids_list\n\n @property\n def cfgx2_qaids(testres):\n qaids_list = [qreq_.qaids for qreq_ in testres.cfgx2_qreq_]\n return qaids_list\n\n def has_constant_daids(testres):\n return ut.allsame(testres.cfgx2_daids)\n\n def has_constant_qaids(testres):\n return ut.allsame(testres.cfgx2_qaids)\n\n def has_constant_length_daids(testres):\n return ut.allsame(list(map(len, testres.cfgx2_daids)))\n\n def has_constant_length_qaids(testres):\n return ut.allsame(list(map(len, testres.cfgx2_qaids)))\n\n def get_infoprop_list(testres, key, qaids=None):\n \"\"\"\n key = 'qx2_gt_rank'\n key = 'qx2_gt_rank'\n qaids = testres.get_test_qaids()\n \"\"\"\n if key == 'participant':\n # Get if qaids are part of the config\n cfgx2_infoprop = [np.in1d(qaids, aids_) for aids_ in testres.cfgx2_qaids]\n else:\n _tmp1_cfgx2_infoprop = ut.get_list_column(testres.cfgx2_cmsinfo, key)\n _tmp2_cfgx2_infoprop = list(\n map(np.array, ut.util_list.replace_nones(_tmp1_cfgx2_infoprop, np.nan))\n )\n if qaids is None:\n cfgx2_infoprop = _tmp2_cfgx2_infoprop\n else:\n # Use nan if the aid doesnt exist\n cfgx2_qaid2_qx = [\n dict(zip(aids_, range(len(aids_)))) for aids_ in testres.cfgx2_qaids\n ]\n qxs_list = [\n ut.dict_take(qaid2_qx, qaids, None) for qaid2_qx in cfgx2_qaid2_qx\n ]\n cfgx2_infoprop = [\n [np.nan if x is None else props[x] for x in qxs]\n for props, qxs in zip(_tmp2_cfgx2_infoprop, qxs_list)\n ]\n if key == 'qx2_gt_rank' or key.endswith('_rank'):\n # hack\n wpr = testres.get_worst_possible_rank()\n cfgx2_infoprop = [\n np.array([wpr if rank == -1 else rank for rank in infoprop])\n for infoprop in cfgx2_infoprop\n ]\n return cfgx2_infoprop\n\n def get_infoprop_mat(testres, key, qaids=None):\n \"\"\"\n key = 'qx2_gf_raw_score'\n key = 'qx2_gt_raw_score'\n \"\"\"\n cfgx2_infoprop = testres.get_infoprop_list(key, qaids)\n # concatenate each query rank across configs\n infoprop_mat = np.vstack(cfgx2_infoprop).T\n return infoprop_mat\n\n @ut.memoize\n def get_rank_mat(testres, qaids=None):\n # Ranks of Best Results\n rank_mat = testres.get_infoprop_mat(key='qx2_gt_rank', qaids=qaids)\n return rank_mat\n\n def get_worst_possible_rank(testres):\n # worst_possible_rank = max(9001, len(testres.daids) + 1)\n worst_possible_rank = max([len(qreq_.daids) for qreq_ in testres.cfgx2_qreq_]) + 1\n # worst_possible_rank = len(testres.daids) + 1\n return worst_possible_rank\n\n def get_rank_histograms(testres, bins=None, key=None, join_acfgs=False):\n \"\"\"\n Ignore:\n testres.get_infoprop_mat('qnx2_gt_name_rank')\n testres.get_infoprop_mat('qnx2_gf_name_rank')\n testres.get_infoprop_mat('qnx2_qnid')\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('testdb1', a=['default'])\n >>> bins = 'dense'\n >>> key = 'qnx2_gt_name_rank'\n >>> config_hists = testres.get_rank_histograms(bins, key=key)\n \"\"\"\n if key is None:\n key = 'qx2_gt_rank'\n # key = 'qnx2_gt_name_rank'\n if bins is None:\n bins = testres.get_rank_histogram_bins()\n elif bins == 'dense':\n bins = np.arange(testres.get_worst_possible_rank() + 1)\n\n cfgx2_ranks = testres.get_infoprop_list(key=key)\n\n # Use numpy histogram repr\n cfgx2_hist = np.zeros((len(cfgx2_ranks), len(bins) - 1), dtype=np.int32)\n\n for cfgx, ranks in enumerate(cfgx2_ranks):\n freq = np.histogram(ranks, bins=bins)[0]\n cfgx2_hist[cfgx] = freq\n\n if join_acfgs:\n # Hack for turtles / general way of doing cross validation\n # however, we need to change the name\n groupxs = testres.get_cfgx_groupxs()\n cfgx2_hist = np.array(\n [\n np.sum(group, axis=0)\n for group in ut.apply_grouping(cfgx2_hist, groupxs)\n ]\n )\n\n return cfgx2_hist, bins\n\n def get_rank_percentage_cumhist(testres, bins='dense', key=None, join_acfgs=False):\n r\"\"\"\n Args:\n bins (unicode): (default = u'dense')\n key (None): (default = None)\n join_acfgs (bool): (default = False)\n\n Returns:\n tuple: (config_cdfs, edges)\n\n CommandLine:\n python -m wbia --tf TestResult.get_rank_percentage_cumhist\n python -m wbia --tf TestResult.get_rank_percentage_cumhist \\\n -t baseline -a unctrl ctrl\n\n python -m wbia --tf TestResult.get_rank_percentage_cumhist \\\n --db lynx \\\n -a default:qsame_imageset=True,been_adjusted=True,excluderef=True \\\n -t default:K=1 --show --cmd\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(\n >>> 'testdb1', a=['default:num_names=1,name_offset=[0,1]'])\n >>> bins = u'dense'\n >>> key = None\n >>> (config_cdfs, edges) = testres.get_rank_percentage_cumhist(bins)\n >>> result = ('(config_cdfs, edges) = %s' % (str((config_cdfs, edges)),))\n >>> print(result)\n \"\"\"\n cfgx2_hist, edges = testres.get_rank_histograms(\n bins, key=key, join_acfgs=join_acfgs\n )\n cfgx2_cumhist = np.cumsum(cfgx2_hist, axis=1)\n cfgx2_cumhist_percent = 100 * cfgx2_cumhist / cfgx2_cumhist.T[-1].T[:, None]\n return cfgx2_cumhist_percent, edges\n\n def get_cfgx_groupxs(testres):\n r\"\"\"\n Returns the group indices of configurations specified to be joined.\n\n Ignore:\n a = [\n 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',\n 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',\n 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',\n 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',\n ]\n >>> a = [\n >>> 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',\n >>> 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',\n >>> 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',\n >>> 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',\n >>> ]\n >>> from wbia.init import main_helpers\n >>> #a = 'default:minqual=good,require_timestamp=True,crossval_enc=True,view=[right,left]'\n >>> t = 'default:K=[1]'\n >>> ibs, testres = main_helpers.testdata_expts('WWF_Lynx_Copy', a=a, t=t)\n >>> testres.get_cfgx_groupxs()\n\n ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids).nids)) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))\n ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids))) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(\n >>> 'PZ_MTEST',\n >>> a=['default:qnum_names=1,qname_offset=[0,1],joinme=1,dpername=1',\n >>> 'default:qsize=1,dpername=[1,2]'],\n >>> t=['default:K=[1,2]'])\n >>> groupxs = testres.get_cfgx_groupxs()\n >>> result = groupxs\n >>> print(result)\n [[6], [4], [0, 2], [7], [5], [1, 3]]\n \"\"\"\n # Group-ids for annotations are determined by joinme labels\n # (used primarilly in cross-validation)\n acfg_joinid = [acfg['qcfg']['joinme'] for acfg in testres.cfgx2_acfg]\n # Anything that does not have a joinme groupid is standalone and must\n # be given a unique groupid\n gen_groupid = it.count(-1, step=-1)\n acfg_groupids = [\n next(gen_groupid) if grpid is None else grpid for grpid in acfg_joinid\n ]\n # Ensure that different pipeline configs are in different groups\n pcfg_groupids = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg)\n cfg_groupids = list(zip(pcfg_groupids, acfg_groupids))\n groupxs = ut.group_indices(cfg_groupids)[1]\n return groupxs\n\n def get_rank_histogram_bins(testres):\n \"\"\"easy to see histogram bins\"\"\"\n worst_possible_rank = testres.get_worst_possible_rank()\n if worst_possible_rank > 50:\n bins = [0, 1, 5, 50, worst_possible_rank, worst_possible_rank + 1]\n elif worst_possible_rank > 5:\n bins = [0, 1, 5, worst_possible_rank, worst_possible_rank + 1]\n else:\n bins = [0, 1, 5]\n return bins\n\n def get_X_LIST(testres):\n \"\"\"DEPRICATE or refactor\"\"\"\n # X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1])\n X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1, 5])\n return X_LIST\n\n def get_nLessX_dict(testres):\n \"\"\"\n Build a (histogram) dictionary mapping X (as in #ranks < X) to a list\n of cfg scores\n \"\"\"\n X_LIST = testres.get_X_LIST()\n nLessX_dict = {int(X): np.zeros(testres.nConfig) for X in X_LIST}\n cfgx2_qx2_gt_rank = testres.get_infoprop_list('qx2_gt_rank')\n for X in X_LIST:\n cfgx2_lessX_mask = [\n np.logical_and(0 <= qx2_gt_ranks, qx2_gt_ranks < X)\n for qx2_gt_ranks in cfgx2_qx2_gt_rank\n ]\n cfgx2_nLessX = np.array([lessX_.sum(axis=0) for lessX_ in cfgx2_lessX_mask])\n nLessX_dict[int(X)] = cfgx2_nLessX\n return nLessX_dict\n\n def get_all_varied_params(testres):\n r\"\"\"\n Returns the parameters that were varied between different\n configurations in this test\n\n Returns:\n list: varied_params\n\n CommandLine:\n python -m wbia TestResult.get_all_varied_params\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> testres = wbia.testdata_expts(\n >>> 'PZ_MTEST', t='default:K=[1,2]')[1]\n >>> varied_params = sorted(testres.get_all_varied_params())\n >>> result = ('varied_params = %s' % (ut.repr2(varied_params),))\n >>> print(result)\n varied_params = ['K', '_cfgindex']\n \"\"\"\n # only for big results\n varied_cfg_params = list(\n set(ut.flatten([cfgdict.keys() for cfgdict in testres.varied_cfg_list]))\n )\n varied_acfg_params = list(\n set(ut.flatten([acfg.keys() for acfg in testres.varied_acfg_list]))\n )\n varied_params = varied_acfg_params + varied_cfg_params\n return varied_params\n\n def get_total_num_varied_params(testres):\n return len(testres.get_all_varied_params())\n\n def get_param_basis(testres, key):\n \"\"\"\n Returns what a param was varied between over all tests\n key = 'K'\n key = 'dcfg_sample_size'\n \"\"\"\n if key == 'len(daids)':\n basis = sorted(list(set([len(daids) for daids in testres.cfgx2_daids])))\n elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):\n basis = sorted(\n list(set([cfgdict[key] for cfgdict in testres.varied_cfg_list]))\n )\n elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):\n basis = sorted(list(set([acfg[key] for acfg in testres.varied_acfg_list])))\n else:\n # assert False, 'param is not varied'\n if key in testres.common_cfgdict:\n basis = [testres.common_cfgdict[key]]\n elif key in testres.nonvaried_acfg:\n basis = [testres.nonvaried_acfg[key]]\n else:\n assert False, 'param=%r doesnt exist' % (key,)\n return basis\n\n def get_param_val_from_cfgx(testres, cfgx, key):\n if key == 'len(daids)':\n return len(testres.cfgx2_daids[cfgx])\n # --- HACK - the keys are different in varied dict for some reason ---\n elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):\n return testres.varied_cfg_list[cfgx][key]\n elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):\n return testres.varied_acfg_list[cfgx][key]\n # --- / Hack\n elif any([key in cfgdict for cfgdict in testres.cfgx2_pcfg]):\n return testres.cfgx2_pcfg[cfgx][key]\n elif any([key in cfgdict for cfgdict in testres.cfgx2_acfg]):\n return testres.cfgx2_acfg[cfgx][key]\n else:\n assert False, 'param=%r doesnt exist' % (key,)\n\n def get_cfgx_with_param(testres, key, val):\n \"\"\"\n Gets configs where the given parameter is held constant\n \"\"\"\n if key == 'len(daids)':\n cfgx_list = [\n cfgx\n for cfgx, daids in enumerate(testres.cfgx2_daids)\n if len(daids) == val\n ]\n elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):\n cfgx_list = [\n cfgx\n for cfgx, cfgdict in enumerate(testres.varied_cfg_list)\n if cfgdict[key] == val\n ]\n elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):\n cfgx_list = [\n cfgx\n for cfgx, acfg in enumerate(testres.varied_acfg_list)\n if acfg[key] == val\n ]\n else:\n if key in testres.common_cfgdict:\n cfgx_list = list(range(testres.nConfig))\n elif key in testres.nonvaried_acfg:\n cfgx_list = list(range(testres.nConfig))\n else:\n assert False, 'param=%r doesnt exist' % (key,)\n # assert False, 'param is not varied'\n return cfgx_list\n\n def get_pipecfg_args(testres):\n if '_cfgstr' in testres.common_cfgdict:\n pipecfg_args = [testres.common_cfgdict['_cfgstr']]\n else:\n pipecfg_args = ut.unique_ordered(\n [cfg['_cfgstr'] for cfg in testres.varied_cfg_list]\n )\n return ' '.join(pipecfg_args)\n\n def get_annotcfg_args(testres):\n \"\"\"\n CommandLine:\n # TODO: More robust fix\n # To reproduce the error\n wbia -e rank_cmc --db humpbacks_fb -a default:mingt=2,qsize=10,dsize=100 default:qmingt=2,qsize=10,dsize=100 -t default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_score_weight=0.5 --show\n \"\"\"\n if '_cfgstr' in testres.common_acfg['common']:\n annotcfg_args = [testres.common_acfg['common']['_cfgstr']]\n else:\n try:\n annotcfg_args = ut.unique_ordered(\n [acfg['common']['_cfgstr'] for acfg in testres.varied_acfg_list]\n )\n except KeyError:\n # HACK FIX\n try:\n annotcfg_args = ut.unique_ordered(\n [acfg['_cfgstr'] for acfg in testres.varied_acfg_list]\n )\n except KeyError:\n annotcfg_args = ut.unique_ordered(\n [acfg['qcfg__cfgstr'] for acfg in testres.varied_acfg_list]\n )\n return ' '.join(annotcfg_args)\n\n def reconstruct_test_flags(testres):\n flagstr = ' '.join(\n [\n '-a ' + testres.get_annotcfg_args(),\n '-t ' + testres.get_pipecfg_args(),\n '--db ' + testres.ibs.get_dbname(),\n ]\n )\n return flagstr\n\n def get_full_cfgstr(testres, cfgx):\n \"\"\"both qannots and dannots included\"\"\"\n full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()\n return full_cfgstr\n\n @ut.memoize\n def get_cfgstr(testres, cfgx):\n \"\"\"just dannots and config_str\"\"\"\n cfgstr = testres.cfgx2_qreq_[cfgx].get_cfgstr()\n return cfgstr\n\n def _shorten_lbls(testres, lbl):\n \"\"\"\n hacky function\n \"\"\"\n import re\n\n repl_list = [\n ('candidacy_', ''),\n ('viewpoint_compare', 'viewpoint'),\n # ('custom', 'default'),\n # ('fg_on', 'FG'),\n # ('fg_on=True', 'FG'),\n # ('fg_on=False,?', ''),\n ('fg_on=True', 'FG=True'),\n ('fg_on=False,?', 'FG=False'),\n ('lnbnn_on=True', 'LNBNN'),\n ('lnbnn_on=False,?', ''),\n ('normonly_on=True', 'normonly'),\n ('normonly_on=False,?', ''),\n ('bar_l2_on=True', 'dist'),\n ('bar_l2_on=False,?', ''),\n (r'joinme=\\d+,?', ''),\n ('dcrossval_enc', 'denc_per_name'),\n ('sv_on', 'SV'),\n ('rotation_invariance', 'RI'),\n ('affine_invariance', 'AI'),\n ('query_rotation_heuristic', 'QRH'),\n ('nNameShortlistSVER', 'nRR'),\n #\n # ('sample_per_ref_name', 'per_ref_name'),\n ('sample_per_ref_name', 'per_gt_name'),\n ('require_timestamp=True', 'require_timestamp'),\n ('require_timestamp=False,?', ''),\n ('require_timestamp=None,?', ''),\n ('[_A-Za-z]*=None,?', ''),\n ('dpername=None,?', ''),\n # ???\n # ('sample_per_ref_name', 'per_gt_name'),\n # ('per_name', 'per_gf_name'), # Try to make labels clearer for paper\n # ----\n # ('prescore_method=\\'?csum\\'?,score_method=\\'?csum\\'?,?', 'amech'),\n # ('prescore_method=\\'?nsum\\'?,score_method=\\'?nsum\\'?,?', 'fmech'),\n (\"prescore_method='?csum'?,score_method='?csum'?,?\", 'mech=annot'),\n (\"prescore_method='?nsum'?,score_method='?nsum'?,?\", 'mech=name'),\n ('force_const_size=[^,]+,?', ''),\n (r'[dq]?_true_size=\\d+,?', ''),\n (r'[dq]?_orig_size=[^,]+,?', ''),\n # Hack\n (\n '[qd]?exclude_reference='\n + ut.regex_or(['True', 'False', 'None'])\n + r'\\,?',\n '',\n ),\n # ('=True', '=On'),\n # ('=False', '=Off'),\n ('=True', '=T'),\n ('=False', '=F'),\n (',$', ''),\n ]\n for ser, rep in repl_list:\n lbl = re.sub(ser, rep, lbl)\n return lbl\n\n def get_short_cfglbls(testres, join_acfgs=False):\n \"\"\"\n Labels for published tables\n\n cfg_lbls = ['baseline:nRR=200+default:', 'baseline:+default:']\n\n CommandLine:\n python -m wbia --tf TestResult.get_short_cfglbls\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl:size=10'],\n >>> t=['default:dim_size=[450,550]'])\n >>> cfg_lbls = testres.get_short_cfglbls()\n >>> result = ('cfg_lbls = %s' % (ut.repr2(cfg_lbls),))\n >>> print(result)\n cfg_lbls = [\n 'default:dim_size=450+ctrl',\n 'default:dim_size=550+ctrl',\n ]\n \"\"\"\n from wbia.expt import annotation_configs\n\n if False:\n acfg_names = [acfg['qcfg']['_cfgstr'] for acfg in testres.cfgx2_acfg]\n pcfg_names = [pcfg['_cfgstr'] for pcfg in testres.cfgx2_pcfg]\n # Only vary the label settings within the cfgname\n acfg_hashes = np.array(list(map(hash, acfg_names)))\n unique_hashes, a_groupxs = vt.group_indices(acfg_hashes)\n a_label_groups = []\n for groupx in a_groupxs:\n acfg_list = ut.take(testres.cfgx2_acfg, groupx)\n varied_lbls = annotation_configs.get_varied_acfg_labels(\n acfg_list, mainkey='_cfgstr'\n )\n a_label_groups.append(varied_lbls)\n acfg_lbls = vt.invert_apply_grouping(a_label_groups, a_groupxs)\n\n pcfg_hashes = np.array(list(map(hash, pcfg_names)))\n unique_hashes, p_groupxs = vt.group_indices(pcfg_hashes)\n p_label_groups = []\n for groupx in p_groupxs:\n pcfg_list = ut.take(testres.cfgx2_pcfg, groupx)\n varied_lbls = ut.get_varied_cfg_lbls(pcfg_list, mainkey='_cfgstr')\n p_label_groups.append(varied_lbls)\n pcfg_lbls = vt.invert_apply_grouping(p_label_groups, p_groupxs)\n\n cfg_lbls = [albl + '+' + plbl for albl, plbl in zip(acfg_lbls, pcfg_lbls)]\n else:\n cfg_lbls_ = testres.cfgx2_lbl[:]\n cfg_lbls_ = [testres._shorten_lbls(lbl) for lbl in cfg_lbls_]\n # split configs up by param and annots\n pa_tups = [lbl.split('+') for lbl in cfg_lbls_]\n cfg_lbls = []\n for pa in pa_tups:\n new_parts = []\n for part in pa:\n _tup = part.split(ut.NAMEVARSEP)\n name, settings = _tup if len(_tup) > 1 else (_tup[0], '')\n new_parts.append(part if settings else name)\n if len(new_parts) == 2 and new_parts[1] == 'default':\n newlbl = new_parts[0]\n else:\n newlbl = '+'.join(new_parts)\n cfg_lbls.append(newlbl)\n if join_acfgs:\n groupxs = testres.get_cfgx_groupxs()\n group_lbls = []\n for group in ut.apply_grouping(cfg_lbls, groupxs):\n num_parts = 0\n part_dicts = []\n for lbl in group:\n parts = []\n for count, pa in enumerate(lbl.split('+')):\n num_parts = max(num_parts, count + 1)\n cfgdict = cfghelpers.parse_cfgstr_list2([pa], strict=False)[0][0]\n parts.append(cfgdict)\n part_dicts.append(parts)\n group_lbl_parts = []\n for px in range(num_parts):\n cfgs = ut.take_column(part_dicts, px)\n nonvaried_cfg = ut.partition_varied_cfg_list(cfgs)[0]\n group_lbl_parts.append(ut.get_cfg_lbl(nonvaried_cfg))\n # logger.info('nonvaried_lbl = %r' % (nonvaried_lbl,))\n group_lbl = '+'.join(group_lbl_parts)\n group_lbls.append(group_lbl)\n cfg_lbls = group_lbls\n return cfg_lbls\n\n def get_varied_labels(testres, shorten=False, join_acfgs=False, sep=''):\n \"\"\"\n Returns labels indicating only the parameters that have been varied between\n different annot/pipeline configurations.\n\n Helper for consistent figure titles\n\n CommandLine:\n python -m wbia --tf TestResult.make_figtitle --prefix \"Seperability \" --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores\n python -m wbia --tf TestResult.make_figtitle\n python -m wbia TestResult.get_varied_labels\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(\n >>> 'PZ_MTEST', t='default:K=[1,2]',\n >>> #a=['timectrl:qsize=[1,2],dsize=[3,4]']\n >>> a=[\n >>> 'default:qsize=[1,2],dsize=2,joinme=1,view=left',\n >>> 'default:qsize=2,dsize=3,joinme=1,view=primary',\n >>> 'default:qsize=[3,2],dsize=4,joinme=2,view=left',\n >>> 'default:qsize=4,dsize=5,joinme=2,view=primary',\n >>> ]\n >>> )\n >>> # >>> ibs, testres = wbia.testdata_expts(\n >>> # >>> 'WWF_Lynx_Copy', t='default:K=1',\n >>> # >>> a=[\n >>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1',\n >>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=2,joinme=2',\n >>> # >>> #'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=3,joinme=3',\n >>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1',\n >>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=2,joinme=2',\n >>> # >>> #'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=3,joinme=3',\n >>> # >>> ]\n >>> # >>> )\n >>> varied_lbls = testres.get_varied_labels(shorten=False, join_acfgs=True)\n >>> result = ('varied_lbls = %s' % (ut.repr2(varied_lbls, strvals=True, nl=2),))\n >>> print(result)\n\n varied_lbls = [u'K=1+qsize=1', u'K=2+qsize=1', u'K=1+qsize=2', u'K=2+qsize=2']\n \"\"\"\n from wbia.expt import annotation_configs\n\n varied_acfgs = annotation_configs.get_varied_acfg_labels(\n testres.cfgx2_acfg, checkname=True\n )\n # logger.info('varied_acfgs = %s' % (ut.repr2(varied_acfgs, nl=2),))\n # logger.info('testres.cfgx2_acfg = %s' % (ut.repr3(testres.cfgx2_acfg),))\n varied_pcfgs = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg, checkname=True)\n # varied_acfgs = ut.get_varied_cfg_lbls(testres.cfgx2_acfg, checkname=True)\n name_sep = ':'\n cfg_sep = '+'\n\n if join_acfgs:\n # Hack for the grouped config problem\n new_varied_acfgs = []\n groupxs = testres.get_cfgx_groupxs()\n grouped_acfgs = ut.apply_grouping(varied_acfgs, groupxs)\n grouped_pcfgs = ut.apply_grouping(varied_pcfgs, groupxs)\n for group in grouped_acfgs:\n group = [p if name_sep in p else name_sep + p for p in group]\n # Re-parse given back into dictionary form\n cfgdicts_ = cfghelpers.parse_cfgstr_list2(group, strict=False)\n # I forget why these are stored in a 2d-list\n cfgdicts = ut.take_column(cfgdicts_, 0)\n new_acfgs = ut.partition_varied_cfg_list(cfgdicts)\n # Hack, just taking the first one that has agreement between\n # joinme / crossvalidation runs\n new_acfg = new_acfgs[0]\n if True:\n # look at internal variance within xval runs\n internal_cfgs = new_acfgs[1]\n import pandas as pd\n\n intern_variations = pd.DataFrame.from_dict(internal_cfgs).to_dict(\n orient='list'\n )\n\n op_prefixes = {\n 'sum': (np.sum, 'Σ-', ''),\n 'mean': (np.mean, 'µ-', ''),\n 'set': (lambda x: '&'.join(set(map(str, x))), '', 's'),\n }\n known_modes = {\n 'dsize': 'mean',\n 'qsize': 'sum',\n 'view': 'set',\n }\n for key in intern_variations.keys():\n if key.startswith('_'):\n continue\n mode = known_modes.get(key, None)\n vals = intern_variations[key]\n if mode is None:\n mode = 'set'\n if key == 'crossval_idx':\n new_acfg['folds'] = len(intern_variations['crossval_idx'])\n else:\n op, pref, suff = op_prefixes[mode]\n c = op(vals)\n if isinstance(c, str):\n new_acfg[pref + key + suff] = c\n else:\n new_acfg[pref + key + suff] = ut.repr2(c, precision=2)\n # if 'dsize' in intern_variations:\n # new_acfg['µ-dsize'] = np.sum(intern_variations['dsize'])\n # if 'qsize' in intern_variations:\n # new_acfg['Σ-qsize'] = np.sum(intern_variations['qsize'])\n # if 'view' in intern_variations:\n # new_acfg['views'] = '&'.join(set(intern_variations['view']))\n # if 'crossval_idx' in intern_variations:\n # new_acfg['folds'] = len(intern_variations['crossval_idx'])\n new_varied_acfgs.append(new_acfg)\n\n # Do one more dup check to remove the duplicate summaries\n common_new_acfg = ut.partition_varied_cfg_list(new_varied_acfgs)[0]\n for key in common_new_acfg.keys():\n if not key.startswith('_'):\n for new_acfg in new_varied_acfgs:\n del new_acfg[key]\n\n varied_pcfgs = ut.take_column(grouped_pcfgs, 0)\n varied_acfgs = [\n ut.get_cfg_lbl(new_acfg_, with_name=False, sep=sep)\n for new_acfg_ in new_varied_acfgs\n ]\n\n def combo_lbls(lbla, lblp):\n parts = []\n if lbla != name_sep and lbla:\n parts.append(lbla)\n if lblp != name_sep and lblp:\n parts.append(lblp)\n return (sep + cfg_sep).join(parts)\n\n varied_lbls = [\n combo_lbls(lbla, lblp) for lblp, lbla in zip(varied_acfgs, varied_pcfgs)\n ]\n if shorten:\n varied_lbls = [testres._shorten_lbls(lbl) for lbl in varied_lbls]\n\n return varied_lbls\n\n def get_sorted_config_labels(testres):\n \"\"\"\n helper\n \"\"\"\n key = 'qx2_gt_rank'\n cfgx2_cumhist_percent, edges = testres.get_rank_percentage_cumhist(\n bins='dense', key=key\n )\n label_list = testres.get_short_cfglbls()\n label_list = [\n ('%6.2f%%' % (percent,))\n # ut.scalar_str(percent, precision=2)\n + ' - ' + label\n for percent, label in zip(cfgx2_cumhist_percent.T[0], label_list)\n ]\n sortx = cfgx2_cumhist_percent.T[0].argsort()[::-1]\n label_list = ut.take(label_list, sortx)\n return label_list\n\n def make_figtitle(testres, plotname='', filt_cfg=None):\n \"\"\"\n Helper for consistent figure titles\n\n CommandLine:\n python -m wbia --tf TestResult.make_figtitle --prefix \"Seperability \" --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores\n python -m wbia --tf TestResult.make_figtitle\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST')\n >>> plotname = ''\n >>> figtitle = testres.make_figtitle(plotname)\n >>> result = ('figtitle = %r' % (figtitle,))\n >>> print(result)\n \"\"\"\n figtitle_prefix = ut.get_argval('--prefix', type_=str, default='')\n if figtitle_prefix != '':\n figtitle_prefix = figtitle_prefix.rstrip() + ' '\n figtitle = figtitle_prefix + plotname\n hasprefix = figtitle_prefix == ''\n if hasprefix:\n figtitle += '\\n'\n\n title_aug = testres.get_title_aug(friendly=True, with_cfg=hasprefix)\n figtitle += ' ' + title_aug\n\n if filt_cfg is not None:\n filt_cfgstr = ut.get_cfg_lbl(filt_cfg)\n if filt_cfgstr.strip() != ':':\n figtitle += ' ' + filt_cfgstr\n return figtitle\n\n def get_title_aug(\n testres, with_size=True, with_db=True, with_cfg=True, friendly=False\n ):\n r\"\"\"\n Args:\n with_size (bool): (default = True)\n\n Returns:\n str: title_aug\n\n CommandLine:\n python -m wbia --tf TestResult.get_title_aug --db PZ_Master1 -a timequalctrl::timectrl\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST')\n >>> with_size = True\n >>> title_aug = testres.get_title_aug(with_size)\n >>> res = u'title_aug = %s' % (title_aug,)\n >>> print(res)\n \"\"\"\n ibs = testres.ibs\n title_aug = ''\n if with_db:\n title_aug += 'db=' + (ibs.get_dbname())\n if with_cfg:\n try:\n if '_cfgname' in testres.common_acfg['common']:\n try:\n annot_cfgname = testres.common_acfg['common']['_cfgstr']\n except KeyError:\n annot_cfgname = testres.common_acfg['common']['_cfgname']\n else:\n cfgname_list = [\n cfg['dcfg__cfgname'] for cfg in testres.varied_acfg_list\n ]\n cfgname_list = ut.unique_ordered(cfgname_list)\n annot_cfgname = '[' + ','.join(cfgname_list) + ']'\n try:\n pipeline_cfgname = testres.common_cfgdict['_cfgstr']\n except KeyError:\n # pipeline_cfgname = testres.common_cfgdict['_cfgname']\n cfgstr_list = [cfg['_cfgstr'] for cfg in testres.varied_cfg_list]\n uniuqe_cfgstrs = ut.unique_ordered(cfgstr_list)\n pipeline_cfgname = '[' + ','.join(uniuqe_cfgstrs) + ']'\n\n annot_cfgname = testres._shorten_lbls(annot_cfgname)\n pipeline_cfgname = testres._shorten_lbls(pipeline_cfgname)\n # hack turn these off if too long\n if len(annot_cfgname) < 64:\n title_aug += ' a=' + annot_cfgname\n if len(pipeline_cfgname) < 64:\n title_aug += ' t=' + pipeline_cfgname\n except Exception as ex:\n logger.info(ut.repr2(testres.common_acfg))\n logger.info(ut.repr2(testres.common_cfgdict))\n ut.printex(ex)\n raise\n if with_size:\n if ut.get_argflag('--hack_size_nl'):\n title_aug += '\\n'\n if testres.has_constant_qaids():\n title_aug += ' #qaids=%r' % (len(testres.qaids),)\n elif testres.has_constant_length_qaids():\n title_aug += ' #qaids=%r*' % (len(testres.cfgx2_qaids[0]),)\n if testres.has_constant_daids():\n daids = testres.cfgx2_daids[0]\n title_aug += ' #daids=%r' % (len(testres.cfgx2_daids[0]),)\n if testres.has_constant_qaids():\n all_daid_per_name_stats = ut.get_stats(\n ibs.get_num_annots_per_name(daids)[0], use_nan=True\n )\n if all_daid_per_name_stats['std'] == 0:\n title_aug += ' dper_name=%s' % (\n ut.scalar_str(\n all_daid_per_name_stats['mean'], max_precision=2\n ),\n )\n else:\n title_aug += ' dper_name=%s±%s' % (\n ut.scalar_str(all_daid_per_name_stats['mean'], precision=2),\n ut.scalar_str(all_daid_per_name_stats['std'], precision=2),\n )\n elif testres.has_constant_length_daids():\n daids = testres.cfgx2_daids[0]\n title_aug += ' #daids=%r*' % (len(testres.cfgx2_daids[0]),)\n\n if friendly:\n # Hackiness for friendliness\n # title_aug = title_aug.replace('db=PZ_Master1', 'Plains Zebras')\n # title_aug = title_aug.replace('db=NNP_MasterGIRM_core', 'Masai Giraffes')\n # title_aug = title_aug.replace('db=GZ_ALL', 'Grevy\\'s Zebras')\n title_aug = ut.multi_replace(\n title_aug,\n list(ibs.const.DBNAME_ALIAS.keys()),\n list(ibs.const.DBNAME_ALIAS.values()),\n )\n # title_aug = title_aug.replace('db=PZ_Master1', 'db=PZ')\n # title_aug = title_aug.replace('db=NNP_MasterGIRM_core', 'Masai Giraffes')\n # title_aug = title_aug.replace('db=GZ_ALL', 'Grevy\\'s Zebras')\n return title_aug\n\n def get_fname_aug(testres, **kwargs):\n import re\n\n title_aug = testres.get_title_aug(**kwargs)\n valid_regex = '-a-zA-Z0-9_.() '\n valid_extra = '=,'\n valid_regex += valid_extra\n title_aug = title_aug.replace(' ', '_') # spaces suck\n fname_aug = re.sub('[^' + valid_regex + ']+', '', title_aug)\n fname_aug = fname_aug.strip('_')\n return fname_aug\n\n def print_pcfg_info(testres):\n \"\"\"\n Prints verbose information about each pipeline configuration\n\n >>> from wbia.expt.test_result import * # NOQA\n \"\"\"\n # TODO: Rectify with other printers\n # for pcfgx, (pipecfg, lbl) in enumerate(zip(pipecfg_list, pipecfg_lbls)):\n # logger.info('+--- %d / %d ===' % (pcfgx, (len(pipecfg_list))))\n # ut.colorprint(lbl, 'white')\n # logger.info(pipecfg.get_cfgstr())\n # logger.info('L___')\n # for qreq_ in testres.cfgx2_qreq_:\n # logger.info(qreq_.get_full_cfgstr())\n # cfgdict_list = [qreq_.qparams for qreq_ in testres.cfgx2_qreq_]\n experiment_helpers.print_pipe_configs(testres.cfgx2_pcfg, testres.cfgx2_qreq_)\n\n def print_acfg_info(testres, **kwargs):\n \"\"\"\n Prints verbose information about the annotations used in each test\n configuration\n\n CommandLine:\n python -m wbia --tf TestResult.print_acfg_info\n\n Kwargs:\n see ibs.get_annot_stats_dict\n hashid, per_name, per_qual, per_vp, per_name_vpedge, per_image,\n min_name_hourdist\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST',\n >>> a=['ctrl::unctrl_comp'],\n >>> t=['candk:K=[1,2]'])\n >>> ibs = None\n >>> result = testres.print_acfg_info()\n >>> print(result)\n \"\"\"\n from wbia.expt import annotation_configs\n\n ibs = testres.ibs\n # Get unique annotation configs\n cfgx2_acfg_label = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg)\n flags = ut.flag_unique_items(cfgx2_acfg_label)\n qreq_list = ut.compress(testres.cfgx2_qreq_, flags)\n acfg_list = ut.compress(testres.cfgx2_acfg, flags)\n expanded_aids_list = [(qreq_.qaids, qreq_.daids) for qreq_ in qreq_list]\n annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs, **kwargs)\n\n def print_unique_annot_config_stats(testres, ibs=None):\n r\"\"\"\n Args:\n ibs (IBEISController): wbia controller object(default = None)\n\n CommandLine:\n python -m wbia TestResult.print_unique_annot_config_stats\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl::unctrl_comp'])\n >>> ibs = None\n >>> result = testres.print_unique_annot_config_stats(ibs)\n >>> print(result)\n \"\"\"\n if ibs is None:\n ibs = testres.ibs\n cfx2_dannot_hashid = [\n ibs.get_annot_hashid_visual_uuid(daids) for daids in testres.cfgx2_daids\n ]\n unique_daids = ut.compress(\n testres.cfgx2_daids, ut.flag_unique_items(cfx2_dannot_hashid)\n )\n with ut.Indenter('[acfgstats]'):\n logger.info('+====')\n logger.info('Printing %d unique annotconfig stats' % (len(unique_daids)))\n common_acfg = testres.common_acfg\n common_acfg['common'] = ut.dict_filter_nones(common_acfg['common'])\n logger.info('testres.common_acfg = ' + ut.repr2(common_acfg))\n logger.info(\n 'param_basis(len(daids)) = %r' % (testres.get_param_basis('len(daids)'),)\n )\n for count, daids in enumerate(unique_daids):\n logger.info('+---')\n logger.info('acfgx = %r/%r' % (count, len(unique_daids)))\n if testres.has_constant_qaids():\n ibs.print_annotconfig_stats(testres.qaids, daids)\n else:\n ibs.print_annot_stats(daids, prefix='d')\n logger.info('L___')\n\n def report(testres):\n testres.print_results()\n\n def print_results(testres, **kwargs):\n r\"\"\"\n CommandLine:\n python -m wbia --tf TestResult.print_results\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.expt import harness\n >>> ibs, testres = harness.testdata_expts('PZ_MTEST')\n >>> result = testres.print_results()\n >>> print(result)\n \"\"\"\n from wbia.expt import experiment_printres\n\n ibs = testres.ibs\n experiment_printres.print_results(ibs, testres, **kwargs)\n\n def get_common_qaids(testres):\n if not testres.has_constant_qaids():\n # Get only cases the tests share for now\n common_qaids = reduce(np.intersect1d, testres.cfgx2_qaids)\n return common_qaids\n else:\n return testres.qaids\n\n def get_all_qaids(testres):\n all_qaids = np.array(ut.unique(ut.flatten(testres.cfgx2_qaids)))\n return all_qaids\n\n def get_test_qaids(testres):\n # Transition fucntion\n return testres.get_all_qaids()\n # return testres.get_common_qaids()\n # all_qaids = ut.unique(ut.flatten(testres.cfgx2_qaids))\n # return all_qaids\n\n def get_all_tags(testres):\n r\"\"\"\n CommandLine:\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h\n python -m wbia --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h,max_gt_rank=5\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> all_tags = testres.get_all_tags()\n >>> selected_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> flat_tags = list(map(str, ut.flatten(ut.flatten(selected_tags))))\n >>> print(ut.repr2(ut.dict_hist(flat_tags), key_order_metric='val'))\n >>> ut.quit_if_noshow()\n >>> import wbia.plottool as pt\n >>> pt.word_histogram2(flat_tags, fnum=1, pnum=(1, 2, 1))\n >>> pt.wordcloud(' '.join(flat_tags), fnum=1, pnum=(1, 2, 2))\n >>> pt.set_figtitle(ut.get_cfg_lbl(filt_cfg))\n >>> ut.show_if_requested()\n \"\"\"\n gt_tags = testres.get_gt_tags()\n gf_tags = testres.get_gf_tags()\n all_tags = [ut.list_zipflatten(*item) for item in zip(gf_tags, gt_tags)]\n return all_tags\n\n def get_gf_tags(testres):\n r\"\"\"\n Returns:\n list: case_pos_list\n\n CommandLine:\n python -m wbia --tf TestResult.get_gf_tags --db PZ_Master1 --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> gf_tags = testres.get_gf_tags()\n \"\"\"\n ibs = testres.ibs\n truth2_prop, prop2_mat = testres.get_truth2_prop()\n gf_annotmatch_rowids = truth2_prop['gf']['annotmatch_rowid']\n gf_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gf_annotmatch_rowids)\n return gf_tags\n\n def get_gt_tags(testres):\n ibs = testres.ibs\n truth2_prop, prop2_mat = testres.get_truth2_prop()\n gt_annotmatch_rowids = truth2_prop['gt']['annotmatch_rowid']\n gt_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gt_annotmatch_rowids)\n return gt_tags\n\n def get_gt_annot_tags(testres):\n ibs = testres.ibs\n truth2_prop, prop2_mat = testres.get_truth2_prop()\n gt_annot_tags = ibs.unflat_map(ibs.get_annot_case_tags, truth2_prop['gt']['aid'])\n return gt_annot_tags\n\n def get_query_annot_tags(testres):\n # FIXME: will break with new config structure\n ibs = testres.ibs\n truth2_prop, prop2_mat = testres.get_truth2_prop()\n unflat_qids = np.tile(testres.qaids[:, None], (len(testres.cfgx2_qaids)))\n query_annot_tags = ibs.unflat_map(ibs.get_annot_case_tags, unflat_qids)\n return query_annot_tags\n\n def get_gtquery_annot_tags(testres):\n gt_annot_tags = testres.get_gt_annot_tags()\n query_annot_tags = testres.get_query_annot_tags()\n both_tags = [\n [ut.flatten(t) for t in zip(*item)]\n for item in zip(query_annot_tags, gt_annot_tags)\n ]\n return both_tags\n\n def case_sample2(testres, filt_cfg, qaids=None, return_mask=False, verbose=None):\n r\"\"\"\n Filters individual test result cases based on how they performed, what\n tags they had, and various other things.\n\n Args:\n filt_cfg (dict):\n\n Returns:\n list: case_pos_list (list of (qx, cfgx)) or isvalid mask\n\n CommandLine:\n python -m wbia TestResult.case_sample2\n python -m wbia TestResult.case_sample2:0\n python -m wbia TestResult.case_sample2:1 --db GZ_ALL --filt :min_tags=1\n python -m wbia TestResult.case_sample2:1 --db PZ_Master1 --filt :min_gf_tags=1\n\n python -m wbia TestResult.case_sample2:2 --db PZ_Master1\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # The same results is achievable with different filter config settings\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> verbose = True\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> filt_cfg1 = {'fail': True}\n >>> case_pos_list1 = testres.case_sample2(filt_cfg1)\n >>> filt_cfg2 = {'min_gtrank': 1}\n >>> case_pos_list2 = testres.case_sample2(filt_cfg2)\n >>> filt_cfg3 = {'min_gtrank': 0}\n >>> case_pos_list3 = testres.case_sample2(filt_cfg3)\n >>> filt_cfg4 = {}\n >>> case_pos_list4 = testres.case_sample2(filt_cfg4)\n >>> assert np.all(case_pos_list1 == case_pos_list2), 'should be equiv configs'\n >>> assert np.any(case_pos_list2 != case_pos_list3), 'should be diff configs'\n >>> assert np.all(case_pos_list3 == case_pos_list4), 'should be equiv configs'\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:sv_on=[True,False]'])\n >>> filt_cfg5 = filt_cfg1.copy()\n >>> mask5 = testres.case_sample2(filt_cfg5, return_mask=True)\n >>> case_pos_list5 = testres.case_sample2(filt_cfg5, return_mask=False)\n >>> assert len(mask5.shape) == 2\n >>> assert np.all(mask5.T[0] == mask5.T[1])\n >>> filt_cfg6 = {'fail': True, 'allcfg': True}\n >>> mask6 = testres.case_sample2(filt_cfg6, return_mask=True)\n >>> assert np.all(mask6.T[0] == mask6.T[1])\n >>> print(mask5)\n >>> print(case_pos_list5)\n >>> filt_cfg = filt_cfg7 = {'disagree': True}\n >>> case_pos_list7 = testres.case_sample2(filt_cfg7, verbose=verbose)\n >>> print(case_pos_list7)\n\n Example:\n >>> # SCRIPT\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> filt_cfg = main_helpers.testdata_filtcfg()\n >>> case_pos_list = testres.case_sample2(filt_cfg)\n >>> result = ('case_pos_list = %s' % (str(case_pos_list),))\n >>> print(result)\n >>> # Extra stuff\n >>> all_tags = testres.get_all_tags()\n >>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> print('selcted_tags = %r' % (selcted_tags,))\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:K=[1,2,3]'])\n >>> ut.exec_funckw(testres.case_sample2, globals())\n >>> filt_cfg = {'fail': True, 'min_gtrank': 1, 'max_gtrank': None, 'min_gf_timedelta': '24h'}\n >>> ibs, testres = main_helpers.testdata_expts('humpbacks_fb', a=['default:has_any=hasnotch,mingt=2,qindex=0:300,dindex=0:300'], t=['default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_net=annot_simple', 'default:proot=vsmany'], qaid_override=[12])\n >>> filt_cfg = ':disagree=True,index=0:8,min_gtscore=.00001,require_all_cfg=True'\n >>> #filt_cfg = cfghelpers.parse_argv_cfg('--filt')[0]\n >>> case_pos_list = testres.case_sample2(filt_cfg, verbose=True)\n >>> result = ('case_pos_list = %s' % (str(case_pos_list),))\n >>> print(result)\n >>> # Extra stuff\n >>> all_tags = testres.get_all_tags()\n >>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])\n >>> print('selcted_tags = %r' % (selcted_tags,))\n\n\n logger.info('qaid = %r' % (qaid,))\n logger.info('qx = %r' % (qx,))\n logger.info('cfgxs = %r' % (cfgxs,))\n # print testres info about this item\n take_cfgs = ut.partial(ut.take, index_list=cfgxs)\n take_qx = ut.partial(ut.take, index_list=qx)\n truth_cfgs = ut.hmap_vals(take_qx, truth2_prop)\n truth_item = ut.hmap_vals(take_cfgs, truth_cfgs, max_depth=1)\n prop_cfgs = ut.hmap_vals(take_qx, prop2_mat)\n prop_item = ut.hmap_vals(take_cfgs, prop_cfgs, max_depth=0)\n logger.info('truth2_prop[item] = ' + ut.repr3(truth_item, nl=2))\n logger.info('prop2_mat[item] = ' + ut.repr3(prop_item, nl=1))\n \"\"\"\n from wbia.expt import cfghelpers\n\n if verbose is None:\n verbose = ut.NOT_QUIET\n if verbose:\n logger.info('[testres] case_sample2')\n\n if isinstance(filt_cfg, str):\n filt_cfg = [filt_cfg]\n if isinstance(filt_cfg, list):\n _combos = cfghelpers.parse_cfgstr_list2(filt_cfg, strict=False)\n filt_cfg = ut.flatten(_combos)[0]\n if isinstance(filt_cfg, str):\n _combos = cfghelpers.parse_cfgstr_list2([filt_cfg], strict=False)\n filt_cfg = ut.flatten(_combos)[0]\n if filt_cfg is None:\n filt_cfg = {}\n\n qaids = testres.get_test_qaids() if qaids is None else qaids\n truth2_prop, prop2_mat = testres.get_truth2_prop(qaids)\n ibs = testres.ibs\n\n # Initialize isvalid flags to all true\n # np.ones(prop2_mat['is_success'].shape, dtype=np.bool)\n participates = prop2_mat['participates']\n is_valid = participates.copy()\n\n def unflat_tag_filterflags(tags_list, **kwargs):\n from wbia import tag_funcs\n\n flat_tags, cumsum = ut.invertible_flatten2(tags_list)\n flat_flags = tag_funcs.filterflags_general_tags(flat_tags, **kwargs)\n flags = np.array(ut.unflatten2(flat_flags, cumsum))\n return flags\n\n UTFF = unflat_tag_filterflags\n\n def cols_disagree(mat, val):\n \"\"\"\n is_success = prop2_mat['is_success']\n \"\"\"\n nCols = mat.shape[1]\n sums = mat.sum(axis=1)\n # Find out which rows have different values\n disagree_flags1d = np.logical_and(sums > 0, sums < nCols)\n disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))\n if not val:\n # User asked for rows that agree\n flags = np.logical_not(disagree_flags2d)\n else:\n flags = disagree_flags2d\n return flags\n\n def cfg_scoresep(mat, val, op):\n \"\"\"\n Compares scores between different configs\n\n op = operator.ge\n is_success = prop2_mat['is_success']\n \"\"\"\n # import scipy.spatial.distance as spdist\n nCols = mat.shape[1]\n pdistx = vt.pdist_indicies(nCols)\n pdist_list = np.array([vt.safe_pdist(row) for row in mat])\n flags_list = op(pdist_list, val)\n colx_list = [\n np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list\n ]\n offsets = np.arange(0, nCols * len(mat), step=nCols)\n idx_list = ut.flatten(\n [colx + offset for colx, offset in zip(colx_list, offsets)]\n )\n mask = vt.index_to_boolmask(idx_list, maxval=offsets[-1] + nCols)\n flags = mask.reshape(mat.shape)\n return flags\n\n # List of rules that can filter results\n rule_list = [\n ('disagree', lambda val: cols_disagree(prop2_mat['is_failure'], val)),\n (\n 'min_gt_cfg_scoresep',\n lambda val: cfg_scoresep(truth2_prop['gt']['score'], val, operator.ge),\n ),\n ('fail', prop2_mat['is_failure']),\n ('success', prop2_mat['is_success']),\n ('min_gtrank', partial(operator.ge, truth2_prop['gt']['rank'])),\n ('max_gtrank', partial(operator.le, truth2_prop['gt']['rank'])),\n ('max_gtscore', partial(operator.le, truth2_prop['gt']['score'])),\n ('min_gtscore', partial(operator.ge, truth2_prop['gt']['score'])),\n ('min_gf_timedelta', partial(operator.ge, truth2_prop['gf']['timedelta'])),\n ('max_gf_timedelta', partial(operator.le, truth2_prop['gf']['timedelta'])),\n # Tag filtering\n # FIXME: will break with new config structure\n ('min_tags', lambda val: UTFF(testres.get_all_tags(), min_num=val)),\n ('max_tags', lambda val: UTFF(testres.get_all_tags(), max_num=val)),\n ('min_gf_tags', lambda val: UTFF(testres.get_gf_tags(), min_num=val)),\n ('max_gf_tags', lambda val: UTFF(testres.get_gf_tags(), max_num=val)),\n ('min_gt_tags', lambda val: UTFF(testres.get_gt_tags(), min_num=val)),\n ('max_gt_tags', lambda val: UTFF(testres.get_gt_tags(), max_num=val)),\n (\n 'min_query_annot_tags',\n lambda val: UTFF(testres.get_query_annot_tags(), min_num=val),\n ),\n (\n 'min_gt_annot_tags',\n lambda val: UTFF(testres.get_gt_annot_tags(), min_num=val),\n ),\n (\n 'min_gtq_tags',\n lambda val: UTFF(testres.get_gtquery_annot_tags(), min_num=val),\n ),\n (\n 'max_gtq_tags',\n lambda val: UTFF(testres.get_gtquery_annot_tags(), max_num=val),\n ),\n ('without_gf_tag', lambda val: UTFF(testres.get_gf_tags(), has_none=val)),\n ('without_gt_tag', lambda val: UTFF(testres.get_gt_tags(), has_none=val)),\n ('with_gf_tag', lambda val: UTFF(testres.get_gf_tags(), has_any=val)),\n ('with_gt_tag', lambda val: UTFF(testres.get_gt_tags(), has_any=val)),\n ('with_tag', lambda val: UTFF(testres.get_all_tags(), has_any=val)),\n ('without_tag', lambda val: UTFF(testres.get_all_tags(), has_none=val)),\n ]\n rule_dict = ut.odict(rule_list)\n rule_list.append(('max_gf_td', rule_dict['max_gf_timedelta']))\n rule_list.append(('min_gf_td', rule_dict['min_gf_timedelta']))\n\n filt_cfg_ = copy.deepcopy(filt_cfg)\n\n # hack to convert to seconds\n for tdkey in filt_cfg_.keys():\n # timedelta_keys = ['min_gf_timedelta', 'max_gf_timedelta']\n # for tdkey in timedelta_keys:\n if tdkey.endswith('_timedelta'):\n filt_cfg_[tdkey] = ut.ensure_timedelta(filt_cfg_[tdkey])\n\n class VerbFilterInfo(object):\n def __init__(self):\n self.prev_num_valid = None\n\n def print_pre(self, is_valid, filt_cfg_):\n num_valid = is_valid.sum()\n logger.info(\n '[testres] Sampling from is_valid.size=%r with filt=%r'\n % (is_valid.size, ut.get_cfg_lbl(filt_cfg_))\n )\n logger.info(' * is_valid.shape = %r' % (is_valid.shape,))\n logger.info(' * num_valid = %r' % (num_valid,))\n self.prev_num_valid = num_valid\n\n def print_post(self, is_valid, flags, msg):\n if flags is not None:\n num_passed = flags.sum()\n num_valid = is_valid.sum()\n num_invalidated = self.prev_num_valid - num_valid\n logger.info(msg)\n if num_invalidated == 0:\n if flags is not None:\n logger.info(' * num_passed = %r' % (num_passed,))\n logger.info(' * num_invalided = %r' % (num_invalidated,))\n else:\n logger.info(' * prev_num_valid = %r' % (self.prev_num_valid,))\n logger.info(' * num_valid = %r' % (num_valid,))\n # logger.info(' * is_valid.shape = %r' % (is_valid.shape,))\n self.prev_num_valid = num_valid\n\n verbinfo = VerbFilterInfo()\n\n if verbose:\n verbinfo.print_pre(is_valid, filt_cfg_)\n\n # Pop irrelevant info\n ut.delete_keys(filt_cfg_, ['_cfgstr', '_cfgindex', '_cfgname', '_cfgtype'])\n # Pop other non-rule config options\n valid_rules = []\n\n def poprule(rulename, default):\n # register other rule names for debuging\n valid_rules.append(rulename)\n return filt_cfg_.pop(rulename, default)\n\n allcfg = poprule('allcfg', None)\n orderby = poprule('orderby', None)\n reverse = poprule('reverse', None)\n sortasc = poprule('sortasc', None)\n sortdsc = poprule('sortdsc', poprule('sortdesc', None))\n max_pername = poprule('max_pername', None)\n require_all_cfg = poprule('require_all_cfg', None)\n index = poprule('index', None)\n # Pop all chosen rules\n rule_value_list = [poprule(key, None) for key, rule in rule_list]\n\n # Assert that only valid configurations were given\n if len(filt_cfg_) > 0:\n logger.info('ERROR')\n logger.info('filtcfg valid rules are = %s' % (ut.repr2(valid_rules, nl=1),))\n for key in filt_cfg_.keys():\n logger.info(\n 'did you mean %r instead of %r?'\n % (ut.closet_words(key, valid_rules)[0], key)\n )\n raise NotImplementedError(\n 'Unhandled filt_cfg.keys() = %r' % (filt_cfg_.keys())\n )\n\n # Remove test cases that do not satisfy chosen rules\n chosen_rule_idxs = ut.where([val is not None for val in rule_value_list])\n chosen_rules = ut.take(rule_list, chosen_rule_idxs)\n chosen_vals = ut.take(rule_value_list, chosen_rule_idxs)\n for (key, rule), val in zip(chosen_rules, chosen_vals):\n if isinstance(rule, np.ndarray):\n # When a rule is an ndarray it must have boolean values\n flags = rule == val\n else:\n flags = rule(val)\n # HACK: flags are forced to be false for non-participating cases\n flags = np.logical_and(flags, participates)\n # conjunctive normal form of satisfiability\n is_valid = np.logical_and(is_valid, flags)\n if verbose:\n verbinfo.print_post(is_valid, flags, 'SampleRule: %s = %r' % (key, val))\n\n # HACK:\n # If one config for a row passes the filter then all configs should pass\n if allcfg:\n is_valid = np.logical_or(np.logical_or.reduce(is_valid.T)[:, None], is_valid)\n is_valid = np.logical_and(is_valid, participates)\n\n qx_list, cfgx_list = np.nonzero(is_valid)\n\n # Determine a good ordering of the test cases\n if sortdsc is not None:\n assert orderby is None, 'use orderby or sortasc'\n assert reverse is None, 'reverse does not work with sortdsc'\n orderby = sortdsc\n reverse = True\n elif sortasc is not None:\n assert reverse is None, 'reverse does not work with sortasc'\n assert orderby is None, 'use orderby or sortasc'\n orderby = sortasc\n reverse = False\n else:\n reverse = False\n if orderby is not None:\n # if orderby == 'gtscore':\n # order_values = truth2_prop['gt']['score']\n # elif orderby == 'gfscore':\n # order_values = truth2_prop['gf']['score']\n # else:\n import re\n\n order_values = None\n for prefix_pattern in ['^gt_?', '^gf_?']:\n prefix_match = re.match(prefix_pattern, orderby)\n if prefix_match is not None:\n truth = prefix_pattern[1:3]\n propname = orderby[prefix_match.end() :]\n if verbose:\n logger.info(\n 'Ordering by truth=%s propname=%s' % (truth, propname)\n )\n order_values = truth2_prop[truth][propname]\n break\n if order_values is None:\n raise NotImplementedError('Unknown orerby=%r' % (orderby,))\n else:\n order_values = np.arange(is_valid.size).reshape(is_valid.shape)\n\n # Convert mask into indicies\n flat_order = order_values[is_valid]\n # Flat sorting indeices in a matrix\n if verbose:\n if verbose:\n logger.info('Reversing ordering (descending)')\n else:\n logger.info('Normal ordering (ascending)')\n if reverse:\n sortx = flat_order.argsort()[::-1]\n else:\n sortx = flat_order.argsort()\n qx_list = qx_list.take(sortx, axis=0)\n cfgx_list = cfgx_list.take(sortx, axis=0)\n\n # Return at most ``max_pername`` annotation examples per name\n if max_pername is not None:\n if verbose:\n logger.info('Returning at most %d cases per name ' % (max_pername,))\n # FIXME: multiple configs\n _qaid_list = np.take(qaids, qx_list)\n _qnid_list = ibs.get_annot_nids(_qaid_list)\n _valid_idxs = []\n seen_ = ut.ddict(lambda: 0)\n for idx, _qnid in enumerate(_qnid_list):\n if seen_[_qnid] < max_pername:\n seen_[_qnid] += 1\n _valid_idxs.append(idx)\n _qx_list = qx_list[_valid_idxs]\n _cfgx_list = cfgx_list[_valid_idxs]\n _valid_index = np.vstack((_qx_list, _cfgx_list)).T\n is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)\n qx_list = _qx_list\n cfgx_list = _cfgx_list\n\n if require_all_cfg:\n if verbose:\n prev_num_valid = is_valid.sum()\n logger.info('Enforcing that all configs must pass filters')\n logger.info(' * prev_num_valid = %r' % (prev_num_valid,))\n qx2_valid_cfgs = ut.group_items(cfgx_list, qx_list)\n hasall_cfg = [len(qx2_valid_cfgs[qx]) == testres.nConfig for qx in qx_list]\n _qx_list = qx_list.compress(hasall_cfg)\n _cfgx_list = cfgx_list.compress(hasall_cfg)\n _valid_index = np.vstack((_qx_list, _cfgx_list)).T\n is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)\n qx_list = _qx_list\n cfgx_list = _cfgx_list\n if verbose:\n verbinfo.print_post(\n is_valid, None, 'Enforcing that all configs must pass filters'\n )\n\n if index is not None:\n if isinstance(index, str):\n index = ut.smart_cast(index, slice)\n _qx_list = ut.take(qx_list, index)\n _cfgx_list = ut.take(cfgx_list, index)\n _valid_index = np.vstack((_qx_list, _cfgx_list)).T\n is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)\n qx_list = _qx_list\n cfgx_list = _cfgx_list\n if verbose:\n verbinfo.print_post(\n is_valid,\n None,\n 'Taking index=%r sample from len(qx_list) = %r'\n % (index, len(qx_list)),\n )\n\n if not return_mask:\n case_pos_list = np.vstack((qx_list, cfgx_list)).T\n case_identifier = case_pos_list\n else:\n if verbose:\n logger.info('Converting cases indicies to a 2d-mask')\n case_identifier = is_valid\n if verbose:\n logger.info('Finished case filtering')\n logger.info('Final case stats:')\n qx_hist = ut.dict_hist(qx_list)\n logger.info(\n 'config per query stats: %r' % (ut.get_stats_str(qx_hist.values()),)\n )\n logger.info(\n 'query per config stats: %r'\n % (ut.get_stats_str(ut.dict_hist(cfgx_list).values()),)\n )\n\n return case_identifier\n\n def get_truth2_prop(testres, qaids=None, join_acfg=False):\n r\"\"\"\n Returns:\n tuple: (truth2_prop, prop2_mat)\n\n CommandLine:\n python -m wbia.expt.test_result --exec-get_truth2_prop --show\n\n Example:\n >>> # xdoctest: +REQUIRES(--slow)\n >>> # ENABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_MTEST', a=['ctrl'])\n >>> (truth2_prop, prop2_mat) = testres.get_truth2_prop()\n >>> result = '(truth2_prop, prop2_mat) = %s' % str((truth2_prop, prop2_mat))\n >>> print(result)\n >>> ut.quit_if_noshow()\n >>> import wbia.plottool as pt\n >>> ut.show_if_requested()\n \"\"\"\n ibs = testres.ibs\n test_qaids = testres.get_test_qaids() if qaids is None else qaids\n\n # test_qaids = ut.random_sample(test_qaids, 20)\n truth2_prop = ut.ddict(ut.odict)\n\n # TODO: have this function take in a case_pos_list as input instead\n participates = testres.get_infoprop_mat('participant', test_qaids)\n\n truth2_prop['gt']['aid'] = testres.get_infoprop_mat('qx2_gt_aid', test_qaids)\n truth2_prop['gf']['aid'] = testres.get_infoprop_mat('qx2_gf_aid', test_qaids)\n truth2_prop['gt']['rank'] = testres.get_infoprop_mat('qx2_gt_rank', test_qaids)\n truth2_prop['gf']['rank'] = testres.get_infoprop_mat('qx2_gf_rank', test_qaids)\n\n truth2_prop['gt']['score'] = testres.get_infoprop_mat(\n 'qx2_gt_raw_score', test_qaids\n )\n truth2_prop['gf']['score'] = testres.get_infoprop_mat(\n 'qx2_gf_raw_score', test_qaids\n )\n truth2_prop['gt']['score'] = np.nan_to_num(truth2_prop['gt']['score'])\n truth2_prop['gf']['score'] = np.nan_to_num(truth2_prop['gf']['score'])\n\n # Cast nans to ints (that are participants)\n # if False:\n for truth in ['gt', 'gf']:\n rank_mat = truth2_prop[truth]['rank']\n flags = np.logical_and(np.isnan(rank_mat), participates)\n rank_mat[flags] = testres.get_worst_possible_rank()\n # truth2_prop[truth]['rank'] = rank_mat.astype(np.int)\n\n is_success = truth2_prop['gt']['rank'] == 0\n is_failure = np.logical_not(is_success)\n\n # THIS IS NOT THE CASE IF THERE ARE UNKNOWN INDIVIDUALS IN THE DATABASE\n assert np.all(is_success == (truth2_prop['gt']['rank'] == 0))\n\n # WEIRD THINGS HAPPEN WHEN UNKNOWNS ARE HERE\n # hardness_degree_rank[is_success]\n # These probably just completely failure spatial verification\n # is_weird = hardness_degree_rank == 0\n\n # Get timedelta and annotmatch rowid\n for truth in ['gt', 'gf']:\n aid_mat = truth2_prop[truth]['aid']\n timedelta_mat = np.vstack(\n [ibs.get_annot_pair_timedelta(test_qaids, aids) for aids in aid_mat.T]\n ).T\n annotmatch_rowid_mat = np.vstack(\n [\n ibs.get_annotmatch_rowid_from_undirected_superkey(test_qaids, aids)\n for aids in aid_mat.T\n ]\n ).T\n truth2_prop[truth]['annotmatch_rowid'] = annotmatch_rowid_mat\n truth2_prop[truth]['timedelta'] = timedelta_mat\n prop2_mat = {}\n\n prop2_mat['is_success'] = is_success\n prop2_mat['is_failure'] = is_failure\n prop2_mat['participates'] = participates\n\n groupxs = testres.get_cfgx_groupxs()\n\n def group_prop(val, grouped_flags, groupxs):\n nRows = len(val)\n # Allocate space for new val\n new_shape = (nRows, len(groupxs))\n if val.dtype == object or val.dtype.type == object:\n new_val = np.full(new_shape, None, dtype=val.dtype)\n elif ut.is_float(val):\n new_val = np.full(new_shape, np.nan, dtype=val.dtype)\n else:\n new_val = np.zeros(new_shape, dtype=val.dtype)\n # Populate new val\n grouped_vals = vt.apply_grouping(val.T, groupxs)\n _iter = enumerate(zip(grouped_flags, grouped_vals))\n for new_col, (flags, group) in _iter:\n rows, cols = np.where(flags.T)\n new_val[rows, new_col] = group.T[(rows, cols)]\n return new_val\n\n if join_acfg:\n assert ut.allsame(participates.sum(axis=1))\n grouped_flags = vt.apply_grouping(participates.T, groupxs)\n\n # new_prop2_mat = {key: group_prop(val)\n # for key, val in prop2_mat.items()}\n # new_truth2_prop = {\n # truth: {key: group_prop(val)\n # for key, val in props.items()}\n # for truth, props in truth2_prop.items()}\n\n new_prop2_mat = {}\n for key, val in prop2_mat.items():\n new_prop2_mat[key] = group_prop(val, grouped_flags, groupxs)\n\n new_truth2_prop = {}\n for truth, props in truth2_prop.items():\n new_props = {}\n for key, val in props.items():\n new_props[key] = group_prop(val, grouped_flags, groupxs)\n new_truth2_prop[truth] = new_props\n\n prop2_mat_ = new_prop2_mat\n truth2_prop_ = new_truth2_prop\n else:\n prop2_mat_ = prop2_mat\n truth2_prop_ = truth2_prop\n return truth2_prop_, prop2_mat_\n\n def interact_individual_result(testres, qaid, cfgx=0):\n ibs = testres.ibs\n cfgx_list = ut.ensure_iterable(cfgx)\n qreq_list = ut.take(testres.cfgx2_qreq_, cfgx_list)\n # Preload any requested configs\n cm_list = [qreq_.execute(qaids=[qaid]) for qreq_ in qreq_list]\n cfgx2_shortlbl = testres.get_short_cfglbls()\n show_kwargs = {\n 'N': 3,\n 'ori': True,\n 'ell_alpha': 0.9,\n }\n # SHOW ANALYSIS\n show_kwargs['show_query'] = False\n show_kwargs['viz_name_score'] = True\n show_kwargs['show_timedelta'] = True\n show_kwargs['show_gf'] = True\n show_kwargs['with_figtitle'] = False\n for cfgx, cm, qreq_ in zip(cfgx_list, cm_list, qreq_list):\n query_lbl = cfgx2_shortlbl[cfgx]\n fnum = cfgx\n cm.ishow_analysis(\n ibs,\n figtitle=query_lbl,\n fnum=fnum,\n annot_mode=1,\n qreq_=qreq_,\n **show_kwargs\n )\n\n def draw_score_diff_disti(testres):\n r\"\"\"\n\n CommandLine:\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db PZ_Master1\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db GZ_Master1\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td1h -t best --db GIRM_Master1\n\n python -m wbia --tf TestResult.draw_score_diff_disti --show -a varynannots_td:qmin_pername=3,dpername=2 -t best --db PZ_Master1\n\n python -m wbia --tf get_annotcfg_list -a varynannots_td -t best --db PZ_Master1\n 13502\n python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td:dsample_size=.01 -t best --show --qaid 13502\n python -m wbia --tf draw_match_cases --db PZ_Master1 -a varynannots_td -t best --show\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('PZ_Master1', a=['varynannots_td'], t=['best'])\n >>> result = testres.draw_score_diff_disti()\n >>> print(result)\n >>> ut.show_if_requested()\n \"\"\"\n import wbia.plottool as pt\n import vtool as vt\n\n # dont look at filtered cases\n ibs = testres.ibs\n qaids = testres.get_test_qaids()\n qaids = ibs.get_annot_tag_filterflags(qaids, {'has_none': 'timedeltaerror'})\n\n gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)\n gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)\n\n gt_valid_flags_list = np.isfinite(gt_rawscore).T\n gf_valid_flags_list = np.isfinite(gf_rawscore).T\n\n cfgx2_gt_scores = vt.zipcompress(gt_rawscore.T, gt_valid_flags_list)\n cfgx2_gf_scores = vt.zipcompress(gf_rawscore.T, gf_valid_flags_list)\n\n # partition by rank\n gt_rank = testres.get_infoprop_mat('qx2_gt_rank', qaids=qaids)\n gf_ranks = testres.get_infoprop_mat('qx2_gf_rank', qaids=qaids)\n cfgx2_gt_ranks = vt.zipcompress(gt_rank.T, gt_valid_flags_list)\n cfgx2_rank0_gt_scores = vt.zipcompress(\n cfgx2_gt_scores, [ranks == 0 for ranks in cfgx2_gt_ranks]\n )\n cfgx2_rankX_gt_scores = vt.zipcompress(\n cfgx2_gt_scores, [ranks > 0 for ranks in cfgx2_gt_ranks]\n )\n cfgx2_gf_ranks = vt.zipcompress(gf_ranks.T, gf_valid_flags_list)\n cfgx2_rank0_gf_scores = vt.zipcompress(\n cfgx2_gf_scores, [ranks == 0 for ranks in cfgx2_gf_ranks]\n )\n\n # valid_gtranks = gt_rank[isvalid]\n # valid_qaids = qaids[isvalid]\n # Hack remove timedelta error\n # valid_qaids = valid_qaids[flags]\n # valid_gt_rawscore = valid_gt_rawscore[flags]\n # valid_gtranks = valid_gtranks[flags]\n\n xdata = list(map(len, testres.cfgx2_daids))\n\n USE_MEDIAN = True # not ut.get_argflag('--use-mean')\n # USE_LOG = True\n USE_LOG = False\n if USE_MEDIAN:\n ave = np.median\n dev = vt.median_abs_dev\n else:\n ave = np.mean\n dev = np.std\n\n def make_interval_args(arr_list, ave=ave, dev=dev, **kwargs):\n # if not USE_MEDIAN:\n # # maybe approximate median by removing the most extreme values\n # arr_list = [np.array(sorted(arr))[5:-5] for arr in arr_list]\n import utool as ut\n\n if USE_LOG:\n arr_list = list(map(lambda x: np.log(x + 1), arr_list))\n sizes_ = list(map(len, arr_list))\n ydata_ = list(map(ave, arr_list))\n spread_ = list(map(dev, arr_list))\n # ut.get_stats(arr_list, axis=0)\n label = kwargs.get('label', '')\n label += ' ' + ut.get_funcname(ave)\n kwargs['label'] = label\n logger.info(\n label\n + 'score stats : '\n + ut.repr2(\n ut.get_jagged_stats(arr_list, use_median=True), nl=1, precision=1\n )\n )\n return ydata_, spread_, kwargs, sizes_\n\n args_list1 = [\n make_interval_args(cfgx2_gt_scores, label='GT', color=pt.TRUE_BLUE),\n make_interval_args(cfgx2_gf_scores, label='GF', color=pt.FALSE_RED),\n ]\n\n args_list2 = [\n make_interval_args(\n cfgx2_rank0_gt_scores, label='GT-rank = 0', color=pt.LIGHT_GREEN\n ),\n make_interval_args(\n cfgx2_rankX_gt_scores, label='GT-rank > 0', color=pt.YELLOW\n ),\n make_interval_args(cfgx2_rank0_gf_scores, label='GF-rank = 0', color=pt.PINK),\n # make_interval_args(cfgx2_rank2_gt_scores, label='gtrank < 2'),\n ]\n\n plotargs_list = [args_list1, args_list2]\n # plotargs_list = [args_list1]\n ymax = -np.inf\n ymin = np.inf\n for args_list in plotargs_list:\n ydata_list = np.array(ut.get_list_column(args_list, 0))\n spread = np.array(ut.get_list_column(args_list, 1))\n ymax = max(ymax, np.array(ydata_list + spread).max())\n ymin = min(ymax, np.array(ydata_list - spread).min())\n\n ylabel = 'log name score' if USE_LOG else 'name score'\n\n statickw = dict(\n # title='scores vs dbsize',\n xlabel='database size (number of annotations)',\n ylabel=ylabel,\n # xscale='log', ymin=0, ymax=10,\n linewidth=2,\n spread_alpha=0.5,\n lightbg=True,\n marker='o',\n # xmax='data',\n ymax=ymax,\n ymin=ymin,\n xmax='data',\n xmin='data',\n )\n\n fnum = pt.ensure_fnum(None)\n pnum_ = pt.make_pnum_nextgen(len(plotargs_list), 1)\n\n for args_list in plotargs_list:\n ydata_list = ut.get_list_column(args_list, 0)\n spread_list = ut.get_list_column(args_list, 1)\n kwargs_list = ut.get_list_column(args_list, 2)\n sizes_list = ut.get_list_column(args_list, 3)\n logger.info('sizes_list = %s' % (ut.repr2(sizes_list, nl=1),))\n\n # Pack kwargs list for multi_plot\n plotkw = ut.dict_stack2(kwargs_list, '_list')\n plotkw2 = ut.merge_dicts(statickw, plotkw)\n\n pt.multi_plot(\n xdata,\n ydata_list,\n spread_list=spread_list,\n fnum=fnum,\n pnum=pnum_(),\n **plotkw2\n )\n\n # pt.adjust_subplots(hspace=.3)\n figtitle = 'Score vs DBSize: %s' % (testres.get_title_aug())\n pt.set_figtitle(figtitle)\n\n def draw_rank_cmc(testres):\n \"\"\"\n Wrapper\n \"\"\"\n from wbia.expt import experiment_drawing\n\n experiment_drawing.draw_rank_cmc(testres.ibs, testres)\n\n def draw_match_cases(testres, **kwargs):\n \"\"\"\n Wrapper\n \"\"\"\n from wbia.expt import experiment_drawing\n\n experiment_drawing.draw_match_cases(testres.ibs, testres, **kwargs)\n\n def draw_failure_cases(testres, **kwargs):\n \"\"\"\n >>> from wbia.other.dbinfo import * # NOQA\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts(defaultdb='PZ_MTEST', a='timectrl:qsize=2', t='invar:AI=[False],RI=False', use_cache=False)\n \"\"\"\n from wbia.expt import experiment_drawing\n\n # kwargs = kwargs.copy()\n orig_filter = ':'\n kwargs['f'] = orig_filter + 'fail'\n case_pos_list = testres.case_sample2(':fail=True,index=0:5')\n experiment_drawing.draw_match_cases(\n testres.ibs,\n testres,\n case_pos_list=case_pos_list,\n annot_modes=[1],\n interact=True,\n )\n\n def find_score_thresh_cutoff(testres):\n \"\"\"\n FIXME\n DUPLICATE CODE\n rectify with experiment_drawing\n \"\"\"\n # import wbia.plottool as pt\n import vtool as vt\n\n if ut.VERBOSE:\n logger.info('[dev] FIX DUPLICATE CODE find_thresh_cutoff')\n # from wbia.expt import cfghelpers\n\n assert len(testres.cfgx2_qreq_) == 1, 'can only specify one config here'\n cfgx = 0\n # qreq_ = testres.cfgx2_qreq_[cfgx]\n test_qaids = testres.get_test_qaids()\n gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=test_qaids).T[\n cfgx\n ]\n gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=test_qaids).T[\n cfgx\n ]\n\n # FIXME: may need to specify which cfg is used in the future\n # isvalid = testres.case_sample2(filt_cfg, return_mask=True).T[cfgx]\n\n tp_nscores = gt_rawscore\n tn_nscores = gf_rawscore\n tn_qaids = tp_qaids = test_qaids\n # encoder = vt.ScoreNormalizer(target_tpr=.7)\n # logger.info(qreq_.get_cfgstr())\n part_attrs = {1: {'qaid': tp_qaids}, 0: {'qaid': tn_qaids}}\n\n fpr = None\n tpr = 0.85\n encoder = vt.ScoreNormalizer(adjust=8, fpr=fpr, tpr=tpr, monotonize=True)\n # tp_scores = tp_nscores\n # tn_scores = tn_nscores\n name_scores, labels, attrs = encoder._to_xy(tp_nscores, tn_nscores, part_attrs)\n encoder.fit(name_scores, labels, attrs)\n score_thresh = encoder.learn_threshold2()\n\n # Find intersection point\n # TODO: add to score normalizer.\n # Improve robustness\n # pt.figure()\n # pt.plot(xdata, curve)\n # pt.plot(x_submax, y_submax, 'o')\n return score_thresh\n\n def print_percent_identification_success(testres):\n \"\"\"\n Prints names identified (at rank 1) / names queried.\n This combines results over multiple queries of a particular name using\n max\n\n OLD, MAYBE DEPRIATE\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia.expt.test_result import * # NOQA\n \"\"\"\n ibs = testres.ibs\n qaids = testres.get_test_qaids()\n unique_nids, groupxs = ut.group_indices(ibs.get_annot_nids(qaids))\n\n qx2_gt_raw_score = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)\n qx2_gf_raw_score = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)\n\n nx2_gt_raw_score = np.array(\n [\n np.nanmax(scores, axis=0)\n for scores in vt.apply_grouping(qx2_gt_raw_score, groupxs)\n ]\n )\n\n nx2_gf_raw_score = np.array(\n [\n np.nanmax(scores, axis=0)\n for scores in vt.apply_grouping(qx2_gf_raw_score, groupxs)\n ]\n )\n\n cfgx2_success = (nx2_gt_raw_score > nx2_gf_raw_score).T\n logger.info('Identification success (names identified / names queried)')\n for cfgx, success in enumerate(cfgx2_success):\n pipelbl = testres.cfgx2_lbl[cfgx]\n percent = 100 * success.sum() / len(success)\n logger.info(\n '%2d) success = %r/%r = %.2f%% -- %s'\n % (cfgx, success.sum(), len(success), percent, pipelbl)\n )\n\n def print_config_overlap(testres, with_plot=True):\n truth2_prop, prop2_mat = testres.get_truth2_prop()\n qx2_gt_ranks = truth2_prop['gt']['rank']\n qx2_success = qx2_gt_ranks == 0\n cfgx2_num_correct = np.nansum(qx2_success, axis=0)\n best_cfgx = cfgx2_num_correct.argmax()\n\n logger.info('Config Overlap')\n\n # Matrix version\n # disjoint_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)\n # improves_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)\n isect_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)\n union_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)\n for cfgx1 in range(testres.nConfig):\n for cfgx2 in range(testres.nConfig):\n if cfgx1 == cfgx2:\n success_qx1 = np.where(qx2_success.T[cfgx1])[0]\n isect_mat[cfgx1][cfgx2] = len(success_qx1)\n union_mat[cfgx1][cfgx2] = len(success_qx1)\n continue\n success_qx1 = np.where(qx2_success.T[cfgx1])[0]\n success_qx2 = np.where(qx2_success.T[cfgx2])[0]\n union_ = np.union1d(success_qx1, success_qx2)\n isect_ = np.intersect1d(success_qx1, success_qx2)\n # disjoints = np.setdiff1d(union_, isect_)\n # disjoint_mat[cfgx1][cfgx2] = len(disjoints)\n isect_mat[cfgx1][cfgx2] = len(isect_)\n union_mat[cfgx1][cfgx2] = len(union_)\n # improves = np.setdiff1d(success_qx2, isect_)\n # improves_mat[cfgx2][cfgx1] = len(improves)\n\n n_success_list = np.array(\n [qx2_success.T[cfgx1].sum() for cfgx1 in range(testres.nConfig)]\n )\n improves_mat = n_success_list[:, None] - isect_mat\n\n disjoint_mat = union_mat - isect_mat\n logger.info('n_success_list = %r' % (n_success_list,))\n logger.info('union_mat =\\n%s' % (union_mat,))\n logger.info('isect_mat =\\n%s' % (isect_mat,))\n logger.info('cfgx1 and cfgx2 have <x> not in common')\n logger.info('disjoint_mat =\\n%s' % (disjoint_mat,))\n logger.info('cfgx1 helps cfgx2 by <x>')\n logger.info('improves_mat =\\n%s' % (improves_mat,))\n logger.info('improves_mat.sum(axis=1) = \\n%s' % (improves_mat.sum(axis=1),))\n bestx_by_improves = improves_mat.sum(axis=1).argmax()\n logger.info('bestx_by_improves = %r' % (bestx_by_improves,))\n\n # Numbered version\n logger.info('best_cfgx = %r' % (best_cfgx,))\n for cfgx in range(testres.nConfig):\n if cfgx == best_cfgx:\n continue\n pipelbl = testres.cfgx2_lbl[cfgx]\n qx2_anysuccess = np.logical_or(qx2_success.T[cfgx], qx2_success.T[best_cfgx])\n # Queries that other got right that best did not get right\n qx2_othersuccess = np.logical_and(\n qx2_anysuccess, np.logical_not(qx2_success.T[best_cfgx])\n )\n logger.info(\n 'cfgx %d) has %d success cases that that the best config does not have -- %s'\n % (cfgx, qx2_othersuccess.sum(), pipelbl)\n )\n\n qx2_success.T[cfgx]\n\n if with_plot:\n # y = None\n # for x in qx2_gt_ranks:\n # x = np.minimum(x, 3)\n # z = (x.T - x[:, None])\n # if np.any(z):\n # logger.info(z)\n # if y is None:\n # y = z\n # else:\n # y += z\n\n if False:\n # Chip size stats\n ave_dlen = [ # NOQA\n np.sqrt(\n np.array(\n testres.ibs.get_annot_chip_dlensqrd(\n testres.qaids, config2_=qreq_.query_config2_\n )\n )\n ).mean()\n for qreq_ in testres.cfgx2_qreq_\n ]\n\n ave_width_inimg = [ # NOQA\n np.array(\n testres.ibs.get_annot_bboxes(\n testres.qaids, config2_=qreq_.query_config2_\n )\n )[:, 2 + 0].mean()\n for qreq_ in testres.cfgx2_qreq_\n ]\n\n ave_width = [ # NOQA\n np.array(\n testres.ibs.get_annot_chip_sizes(\n testres.qaids, config2_=qreq_.query_config2_\n )\n )[:, 0].mean()\n for qreq_ in testres.cfgx2_qreq_\n ]\n\n import wbia.plottool as pt\n\n # pt.plt.imshow(-y, interpolation='none', cmap='hot')\n # pt.plt.colorbar()\n\n def label_ticks():\n import wbia.plottool as pt\n\n ax = pt.gca()\n labels = testres.get_varied_labels()\n ax.set_xticks(list(range(len(labels))))\n ax.set_xticklabels([lbl[0:100] for lbl in labels])\n [lbl.set_rotation(-25) for lbl in ax.get_xticklabels()]\n [lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()]\n\n # xgrid, ygrid = np.meshgrid(range(len(labels)), range(len(labels)))\n # pt.plot_surface3d(xgrid, ygrid, disjoint_mat)\n ax.set_yticks(list(range(len(labels))))\n ax.set_yticklabels([lbl[0:100] for lbl in labels])\n [lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()]\n [lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()]\n # [lbl.set_rotation(20) for lbl in ax.get_yticklabels()]\n\n pt.figure(fnum=pt.next_fnum())\n pt.plt.imshow(union_mat, interpolation='none', cmap='hot')\n pt.plt.colorbar()\n pt.set_title(\n 'union mat: cfg<x> and cfg<y> have <z> success cases in in total'\n )\n label_ticks()\n label_ticks()\n\n pt.figure(fnum=pt.next_fnum())\n pt.plt.imshow(isect_mat, interpolation='none', cmap='hot')\n pt.plt.colorbar()\n pt.set_title('isect mat: cfg<x> and cfg<y> have <z> success cases in common')\n label_ticks()\n\n pt.figure(fnum=pt.next_fnum())\n pt.plt.imshow(disjoint_mat, interpolation='none', cmap='hot')\n pt.plt.colorbar()\n pt.set_title(\n 'disjoint mat (union - isect): cfg<x> and cfg<y> have <z> success cases not in common'\n )\n\n # xgrid, ygrid = np.meshgrid(range(len(labels)), range(len(labels)))\n # pt.plot_surface3d(xgrid, ygrid, improves_mat)\n\n pt.figure(fnum=pt.next_fnum())\n pt.plt.imshow(improves_mat, interpolation='none', cmap='hot')\n pt.plt.colorbar()\n pt.set_title(\n 'improves mat (diag.T - isect): cfg<x> got <z> qaids that cfg <y> missed'\n )\n label_ticks()\n # pt.colorbar(np.unique(y))\n\n def map_score(testres):\n \"\"\"\n For each query compute a precision recall curve.\n Then, for each query compute the average precision.\n Then take the mean of all average precisions to obtain the mAP.\n\n Script:\n >>> #ibs = wbia.opendb('Oxford')\n >>> #ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True]')\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True],can_match_sameimg=True')\n >>> import wbia\n >>> ibs, testres = wbia.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True')\n \"\"\"\n import sklearn.metrics\n\n qaids = testres.get_test_qaids()\n ibs = testres.ibs\n\n PLOT = True\n PLOT = False\n\n cfgx2_cms = []\n for qreq_ in testres.cfgx2_qreq_:\n cm_list = qreq_.execute(qaids)\n cm_list = [cm.extend_results(qreq_) for cm in cm_list]\n for cm in cm_list:\n cm.score_annot_csum(qreq_)\n # #cm.sortself()\n cfgx2_cms.append(cm_list)\n\n map_list = []\n unique_names, groupxs = ut.group_indices(ibs.annots(qaids).names)\n\n for cm_list, qreq_ in zip(cfgx2_cms, testres.cfgx2_qreq_):\n if PLOT:\n import wbia.plottool as pt\n\n pt.qt4ensure()\n fnum = pt.ensure_fnum(None)\n pt.figure(fnum=fnum)\n avep_list = []\n # fnum = pt.ensure_fnum(None)\n # pt.figure(fnum=fnum)\n for cm in cm_list:\n # Ignore junk images\n flags = np.array(ibs.annots(cm.daid_list).quality_texts) != 'junk'\n assert np.all(flags)\n daid_list = cm.daid_list\n dnid_list = cm.dnid_list\n y_true = (cm.qnid == dnid_list).compress(flags).astype(np.int)\n y_score = cm.annot_score_list.compress(flags)\n\n y_score[~np.isfinite(y_score)] = 0\n y_score = np.nan_to_num(y_score)\n sortx = np.argsort(y_score)[::-1]\n daid_list = daid_list.take(sortx)\n dnid_list = dnid_list.take(sortx)\n y_true = y_true.take(sortx)\n y_score = y_score.take(sortx)\n # logger.info(cm.get_annot_ranks(cm.get_top_gt_aids(ibs)))\n\n precision, recall, thresholds = sklearn.metrics.precision_recall_curve(\n y_true, y_score\n )\n\n if PLOT:\n pt.plot2(\n recall,\n precision,\n marker='',\n linestyle='-',\n x_label='recall',\n y_label='precision',\n )\n\n avep = sklearn.metrics.average_precision_score(y_true, y_score)\n # avep = [\n # sklearn.metrics.average_precision_score(y_true, y_score, average=average)\n # for average in ['micro', 'macro', 'samples', 'weighted']\n # ]\n # if np.any(np.isnan(avep)):\n # break\n # if np.isnan(avep):\n # break\n avep_list.append(avep)\n # mean_ave_precision = np.mean(avep_list, axis=0)\n name_to_ave = [np.mean(a) for a in ut.apply_grouping(avep_list, groupxs)]\n name_to_ave_ = dict(zip(unique_names, name_to_ave))\n logger.info(\n 'name_to_ave_ = %s' % (ut.align(ut.repr3(name_to_ave_, precision=3), ':'))\n )\n mean_ave_precision = np.mean(name_to_ave)\n logger.info('mean_ave_precision = %r' % (mean_ave_precision,))\n map_list.append(mean_ave_precision)\n return map_list\n\n def embed_testres(testres):\n \"\"\"\n CommandLine:\n python -m wbia TestResults.embed_testres\n\n Example:\n >>> # SCRIPT\n >>> from wbia.expt.test_result import * # NOQA\n >>> from wbia.init import main_helpers\n >>> ibs, testres = main_helpers.testdata_expts(defaultdb='PZ_MTEST')\n >>> embed_testres(testres)\n \"\"\"\n ut.embed()\n\n def get_options(testres):\n func_list = [\n testres.print_results,\n testres.draw_rank_cmc,\n testres.draw_match_cases,\n testres.embed_testres,\n ]\n return func_list\n\n def get_actions(testres):\n actions = ut.odict(\n [\n (testres.print_results, (['print', 'p'], '')),\n (testres.draw_rank_cmc, (['cmc'], '')),\n (testres.draw_match_cases, (['case'], '')),\n (testres.embed_testres, (['embed', 'ipy'], '')),\n ]\n )\n return actions\n\n def help(testres):\n # list functions that accept the standard interface\n prefix = 'wbia'\n suffix = testres.reconstruct_test_flags()\n func_list = testres.get_options()\n funcname_list = [ut.get_funcname(func) for func in func_list]\n cmdstr_list = [' '.join([prefix, funcname, suffix]) for funcname in funcname_list]\n ut.cprint('Available Functions:', 'blue')\n logger.info(', '.join(funcname_list))\n ut.cprint('Available Commandline:', 'blue')\n logger.info('\\n'.join(cmdstr_list))\n" ]
[ [ "numpy.tile", "numpy.mean", "numpy.where", "numpy.cumsum", "numpy.histogram", "numpy.full", "numpy.nan_to_num", "numpy.log", "numpy.nonzero", "numpy.logical_and", "numpy.take", "numpy.arange", "numpy.isfinite", "numpy.in1d", "numpy.nanmax", "numpy.vstack", "numpy.logical_or", "numpy.array", "numpy.zeros", "numpy.nansum", "numpy.intersect1d", "numpy.argsort", "numpy.logical_not", "numpy.isnan", "numpy.union1d", "pandas.DataFrame.from_dict", "numpy.sum", "numpy.any", "numpy.logical_or.reduce", "numpy.all" ] ]
garthee/gnot
[ "ac698c5cce0e2ebb77dd84c5d050417b735f5fbf" ]
[ "modules/ml_kmeans.py" ]
[ "import re\nimport json\nfrom math import log, sqrt\n\nfrom jinja2 import Markup\nfrom sklearn import cluster\nfrom sklearn.decomposition import PCA\nfrom scipy import stats\nfrom sklearn import metrics\nimport numpy\n\nfrom db import export_sql\nfrom werkzeug.wrappers import Response\n\n# create higher order transformations\ndef x2fs(X, fields, type=''):\n if type == 'Interaction':\n s2 = lambda x: x + 1\n e2 = lambda x, y: y\n elif type == 'Quadratic':\n s2 = lambda x: x\n e2 = lambda x, y: y\n elif type == 'Purely Quadratic':\n s2 = lambda x: x\n e2 = lambda x, y: x + 1\n else:\n return\n\n l1 = len(X[0])\n l2 = len(X[0])\n\n for i in range(len(X)):\n r = X[i]\n for j1 in range(l1):\n for j2 in range(s2(j1), e2(j1, l2)):\n r.append(r[j1] * r[j2])\n\n for j1 in range(l1):\n for j2 in range(s2(j1), e2(j1, l2)):\n fields.append(fields[j1] + '*' + fields[j2])\n\n\n# fit_transform from sklearn doesn't return the loadings V. Here is a hacked version\ndef fit_transform(pca, X):\n U, S, V = pca._fit(X)\n\n if pca.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= sqrt(X.shape[0])\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S\n # transposing component matrix such that PCA_1 is in row\n V = V.transpose()\n return (U, V)\n\n\ndef evaluate(clust_dists, clustidx, X):\n results = {}\n sum = 0\n count = 0\n clustsum = [0 for i in range(len(clust_dists[0]))]\n clustcount = [0 for i in range(len(clust_dists[0]))]\n clustmean = [0 for i in range(len(clust_dists[0]))]\n for i in range(len(clustidx)):\n sum += clust_dists[i][clustidx[i]]\n count += 1\n\n clustsum[clustidx[i]] += clust_dists[i][clustidx[i]]\n clustcount[clustidx[i]] += 1\n\n averagedist = float(sum) / count\n\n results['meandist'] = averagedist\n\n for i in range(len(clust_dists[0])):\n clustmean[i] = float(clustsum[i]) / clustcount[i]\n\n return results, clustmean\n\n\ndef render(vis, request, info):\n info[\"message\"] = []\n info[\"results\"] = []\n\n # module independent user inputs\n table = request.args.get(\"table\", '')\n where = request.args.get(\"where\", '1=1')\n limit = request.args.get(\"limit\", '1000')\n start = request.args.get(\"start\", '0') # start at 0\n reload = int(request.args.get(\"reload\", 0))\n view = request.args.get(\"view\", '')\n\n # module dependent user inputs\n field = request.args.get(\"field\", '')\n pre_process = request.args.get(\"pre_process\", '')\n pre_transform = request.args.get(\"pre_transform\", '')\n\n orderBy = request.args.get(\"orderBy\", '')\n groupBy = request.args.get(\"groupBy\", '')\n if orderBy and len(orderBy) > 0: orderBy = ' order by %s' % orderBy\n if groupBy and len(groupBy) > 0: groupBy = ' group by %s' % groupBy\n\n k = int(request.args.get(\"k\", 2))\n pfield = request.args.get(\"pfield\", [])\n\n # verify essential parameter details - smell test\n if len(table) == 0 or len(field) == 0:\n info[\"message\"].append(\"Table or field missing\")\n info[\"message_class\"] = \"failure\"\n else:\n # prepare sql query\n sql = \"select %s from %s where %s %s %s limit %s offset %s\" % (\n field, table, where, groupBy, orderBy, limit, start)\n\n (datfile, reload, result) = export_sql(sql, vis.config, reload, None, view)\n\n if len(result) > 0:\n info[\"message\"].append(result)\n info[\"message_class\"] = \"failure\"\n else:\n X = []\n with open(datfile, 'r') as f:\n for r in f:\n row = r.rstrip().split(',')\n X.append([float(r) for r in row])\n\n xfield = pfield\n # transform features\n x2fs(X, xfield, pre_transform)\n pfield = xfield\n\n X = numpy.array(X)\n\n if pre_process == \"Z-Score\":\n X = stats.zscore(X, axis=0)\n elif pre_process == \"PCA\":\n pca = PCA()\n (X, V) = fit_transform(pca, X)\n pfield = ['PCA_%d' % (d + 1) for d in range(len(pfield))]\n elif pre_process == \"Whitened PCA\":\n pca = PCA(whiten=True)\n (X, V) = fit_transform(pca, X)\n pfield = ['PCA_%d' % (d + 1) for d in range(len(pfield))]\n\n clust = cluster.KMeans(n_clusters=k)\n cidx = clust.fit_predict(X)\n cdists = clust.transform(X)\n\n\n # summary results\n results, clustmeans = evaluate(cdists, cidx, X)\n info[\"results\"].append('Clustering the data using K-means with k=%d' % k)\n info[\"results\"].append('Average distance to centroid: %.4f' % results['meandist'])\n\n hashquery = datfile + hex(hash(request.args.get('query', datfile)) & 0xffffffff)\n\n if pre_process == \"PCA\" or pre_process == \"Whitened PCA\":\n #write pca matrix file\n info[\"datfile_matrix\"] = hashquery + '.pca.csv'\n with open(info[\"datfile_matrix\"], 'w') as f:\n f.write(\"feature,%s\\n\" % (','.join(xfield)))\n for i in range(len(V)):\n f.write('PCA_%d,%s\\n' % (i + 1, ','.join([str(v) for v in V[i]])))\n info[\"pca_matrix_divs\"] = Markup('<h2>PCA Components</h2><div id=\"svg-pca_matrix\"></div>')\n else:\n info[\"pca_matrix_divs\"] = ''\n\n # preparing within cluster distances into a js array\n f = []\n for i in range(k):\n f.append('{cluster:\"%d\", distance:%.3f}' % (i, clustmeans[i]))\n info[\"clust_data\"] = Markup('clust_data=[' + ','.join(f) + '];')\n\n #provenance\n #0:id,1:prediction result (grouping),2:actual label(shape),3:error,4:y,or features\n info[\"datfile_provenance\"] = hashquery + '.provenance.csv'\n RES = ['Cluster %d' % (i + 1) for i in range(k)]\n with open(info[\"datfile_provenance\"], 'w') as f:\n f.write('Cluster,Error,%s\\n' % (','.join(pfield)))\n for i in range(len(cidx)):\n e = cdists[i][cidx[i]]\n f.write('%s,%.4f,%s\\n' % (RES[cidx[i]], e, ','.join([str(r) for r in X[i]])))\n\n pfield = ['cluster'] + pfield\n divs = [\n '<div class=\"chart\"><div class=\"title\">%s<a href=\"javascript:reset(%d)\" class=\"reset\" style=\"display: none;\">reset</a></div></div>' % (\n pfield[d], d + 1) for d in range(len(pfield))]\n divs = ''.join(divs)\n divs = '<div class=\"chart\"><div class=\"title\">Distance to Centroid (<span id=\"active\"></span> of <span id=\"total\"></span> items selected.)<a href=\"javascript:reset(0)\" class=\"reset\" style=\"display: none;\">reset</a></div></div>' + divs\n info['provenance_divs'] = Markup(divs)\n\n info[\"message_class\"] = \"success\"\n if reload > 0:\n info[\"message\"].append(\"Loaded fresh.\")\n else:\n info[\"message\"].append(\"Loading from cache. Use reload=1 to reload.\")\n\n info[\"datfile\"] = info[\"datfile_provenance\"]\n # prepare some messages\n info[\"title\"] = \"FIELD_X: <em>%s</em> from <br />TABLE: <em>%s</em>\" % (','.join(pfield), table)\n info[\"title\"] = Markup(info[\"title\"])\n info[\"message\"] = Markup(''.join('<p>%s</p>' % m for m in info[\"message\"] if len(m) > 0))\n info[\"results\"] = Markup('<ul>' + ''.join('<li>%s</li>' % m for m in info[\"results\"] if len(m) > 0) + '</ul>')\n\n # format the message to encode HTML characters\n info['query'] = Markup(request.args.get('query', ''))\n\n t = vis.jinja_env.get_template('explore.html')\n v1 = t.render(**info)\n t = vis.jinja_env.get_template('ml_kmeans.html')\n v2 = t.render(**info)\n v3 = v1[:-7] + v2 + v1[-7:] + '</html>'\n return Response(v3, mimetype='text/html')\n" ]
[ [ "scipy.stats.zscore", "numpy.array", "sklearn.cluster.KMeans", "sklearn.decomposition.PCA" ] ]
kovibalu/face.evoLVe.PyTorch
[ "b7483bd05efc637f58d40e3b2c5f3421d52f1aae" ]
[ "test_video_stream.py" ]
[ "import argparse\nimport glob\nimport os\nimport time\nimport vlc\n\nimport cv2\nimport numpy as np\nfrom enum import Enum\nfrom tqdm import tqdm\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom align.align_trans import get_reference_facial_points\nfrom align.detector import load_detect_faces_models, process_faces\nfrom align.visualization_utils import draw_fps, show_results\nfrom util.extract_feature_v2 import extract_feature_for_img, load_face_id_model\n\nMIN_FACE_PROB = 0.9\nSTREAM_DIR = '/home/ec2-user/projects/facelab-data/stream-data'\nRESULT_DIR = '/home/ec2-user/projects/facelab-data/results'\nID_FEATURES_DIR = '/home/ec2-user/projects/facelab-data/test_Aligned/'\nFACE_ID_MODEL_ROOT = '/home/ec2-user/projects/facelab-data/models/backbone_ir50_ms1m_epoch120.pth'\nFONT_PATH = '/usr/share/fonts/dejavu/DejaVuSans.ttf'\n\n\nclass Mode(Enum):\n DEMO = 1\n FILE = 2\n\n def __str__(self):\n return self.name\n\n @staticmethod\n def from_string(s):\n try:\n return Mode[s]\n except KeyError:\n raise ValueError()\n\n\ndef load_id_files(id_features_dir):\n id_npy = {}\n for path in glob.glob('{}/*.npy'.format(id_features_dir)):\n name = os.path.splitext(os.path.basename(path))[0]\n id_npy[name] = np.load(path)\n return id_npy\n\n\ndef check_identity(id_npy, query_features, max_min_dist=1.0):\n distances_from_id = {}\n for name, id_npy_arr in id_npy.items():\n distances_from_id[name] = []\n for id_npy_row in id_npy_arr:\n dist = np.linalg.norm(id_npy_row - query_features)\n distances_from_id[name].append(dist)\n\n min_dist = np.finfo(float).max\n name_match = ''\n for name, distances in distances_from_id.items():\n avg = np.mean(distances)\n if avg < min_dist:\n min_dist = avg\n name_match = name\n\n if min_dist > max_min_dist:\n name_match = 'Unknown'\n\n return name_match, min_dist\n\n\ndef process_and_viz_img(pil_img,\n det_models,\n face_id_model,\n reference,\n crop_size,\n id_npy,\n font):\n # Detect bboxes and landmarks for all faces in the image and warp the\n # faces.\n face_results = process_faces(\n img=pil_img,\n det_models=det_models,\n reference=reference,\n crop_size=crop_size)\n\n # Filter results by detection probability.\n filtered_face_results = []\n for face_result in face_results:\n face_prob = face_result.bounding_box[4]\n if face_prob < MIN_FACE_PROB:\n print('Skipping detection with low face probability: {:.2f}'.format(face_prob))\n continue\n\n filtered_face_results.append(face_result)\n\n face_results = filtered_face_results\n\n identity_list = []\n for face_result in face_results:\n features = extract_feature_for_img(\n img=face_result.warped_face,\n backbone=face_id_model)\n # features is tensor, so converting to numpy arr below\n identity, min_dist = check_identity(\n id_npy=id_npy,\n query_features=features.numpy())\n identity_list.append((identity, '({:.2f})'.format(min_dist)))\n\n # Visualize the results\n viz_img = show_results(\n img=pil_img,\n bounding_boxes=[\n fr.bounding_box\n for fr in face_results\n ],\n facial_landmarks=[\n fr.landmark\n for fr in face_results\n ],\n names=identity_list,\n font=font)\n\n if identity_list:\n names = list(zip(*identity_list))[0]\n else:\n names = []\n return viz_img, names\n\n\ndef play_sound_for_name(name):\n name_to_sound_file = {\n 'neelam': '/Users/bkovacs/Documents/neelam-how-is-it-going.m4a',\n 'kovi': '/Users/bkovacs/Documents/balazs-how-is-it-going.m4a',\n }\n name = name.lower()\n if name not in name_to_sound_file:\n return\n player = vlc.MediaPlayer(name_to_sound_file[name])\n player.play()\n\n\ndef play_sound_if_needed(names,\n name_to_last_time_seen,\n cur_time,\n min_elapsed_to_play=3):\n for name in names:\n if (name not in name_to_last_time_seen or\n name_to_last_time_seen[name] + min_elapsed_to_play < cur_time):\n play_sound_for_name(name)\n name_to_last_time_seen[name] = cur_time\n\n\ndef demo(det_models,\n face_id_model,\n reference,\n crop_size,\n id_npy,\n max_size,\n font):\n cap = cv2.VideoCapture(0)\n name_to_last_time_seen = {}\n\n try:\n while cap.isOpened():\n start_time = time.time()\n ret, image_np = cap.read()\n if ret and cap.isOpened():\n # Process frame\n # BGR -> RGB\n pil_img = Image.fromarray(image_np[..., ::-1])\n pil_img.thumbnail((max_size, max_size))\n viz_img, names = process_and_viz_img(\n pil_img=pil_img,\n det_models=det_models,\n face_id_model=face_id_model,\n reference=reference,\n crop_size=crop_size,\n id_npy=id_npy,\n font=font,\n )\n cur_time = time.time()\n play_sound_if_needed(\n names=names,\n name_to_last_time_seen=name_to_last_time_seen,\n cur_time=cur_time)\n\n fps = 1.0 / (cur_time - start_time)\n draw_fps(\n img=viz_img,\n font=font,\n fps=fps,\n )\n # Display the resulting frame\n viz_img_bgr = np.array(viz_img)[..., ::-1]\n cv2.imshow('Face Detection Demo', viz_img_bgr)\n # Quit if we press 'q'.\n if (cv2.waitKey(1) & 0xFF) == ord('q'):\n break\n finally:\n # When everything is done, release the capture.\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef process_files(input_dir,\n output_dir,\n det_models,\n face_id_model,\n reference,\n crop_size,\n id_npy,\n max_size,\n font):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n image_names = list(os.listdir(input_dir))\n for img_idx in tqdm(range(len(image_names))):\n image_name = image_names[img_idx]\n pil_img = Image.open(os.path.join(input_dir, image_name))\n pil_img.thumbnail((max_size, max_size))\n viz_img, _ = process_and_viz_img(\n pil_img=pil_img,\n det_models=det_models,\n face_id_model=face_id_model,\n reference=reference,\n crop_size=crop_size,\n id_npy=id_npy,\n font=font,\n )\n viz_img.save(os.path.join(output_dir, '{}-stream.jpg'.format(img_idx)))\n\n\ndef main(mode, face_id_model_root, id_features_dir, font_path):\n print('Loading models...')\n det_models = load_detect_faces_models()\n face_id_model = load_face_id_model(model_root=face_id_model_root)\n id_npy = load_id_files(id_features_dir)\n crop_size = 112\n max_size = 1024\n reference = get_reference_facial_points(default_square=True)\n font = ImageFont.FreeTypeFont(font=font_path, size=24)\n\n print('Starting image processing...')\n if mode == Mode.DEMO:\n demo(\n det_models=det_models,\n face_id_model=face_id_model,\n reference=reference,\n crop_size=crop_size,\n id_npy=id_npy,\n max_size=max_size,\n font=font)\n elif mode == Mode.FILE:\n process_files(\n input_dir=STREAM_DIR,\n output_dir=RESULT_DIR,\n det_models=det_models,\n face_id_model=face_id_model,\n reference=reference,\n crop_size=crop_size,\n id_npy=id_npy,\n max_size=max_size,\n font=font)\n else:\n raise ValueError('Invalid mode: {}'.format(mode))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=Mode.from_string, default=Mode.DEMO, choices=list(Mode))\n parser.add_argument('--face_id_model_root',\n type=str,\n default=FACE_ID_MODEL_ROOT)\n parser.add_argument('--id_features_dir',\n type=str,\n default=ID_FEATURES_DIR)\n parser.add_argument('--font_path',\n type=str,\n default=FONT_PATH)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args.mode,\n args.face_id_model_root,\n args.id_features_dir,\n args.font_path)\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.load", "numpy.mean", "numpy.finfo" ] ]
skn047/DeepLearningMugenKnock
[ "73d2b903816b380d56020c8336041883bc0d131c", "73d2b903816b380d56020c8336041883bc0d131c" ]
[ "Question_semaseg/my_answers/bin_loss_pytorch.py", "Question_semaseg/answers/transposeconv_tensorflow_slim.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport argparse\nimport cv2\nimport numpy as np\nfrom glob import glob\nimport matplotlib.pyplot as plt\n\nnum_classes = 2\nimg_height, img_width = 64, 64#572, 572\nout_height, out_width = 64, 64#388, 388\nGPU = False\ntorch.manual_seed(0)\n\n\nclass Mynet(torch.nn.Module):\n def __init__(self):\n super(Mynet, self).__init__() # necessarry?\n\n enc1 = []\n\n enc1.append(torch.nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1))\n enc1.append(torch.nn.BatchNorm2d(32))\n enc1.append(torch.nn.ReLU())\n\n enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))\n enc1.append(torch.nn.BatchNorm2d(32))\n enc1.append(torch.nn.ReLU())\n\n enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))\n enc1.append(torch.nn.BatchNorm2d(32))\n enc1.append(torch.nn.ReLU())\n\n enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))\n enc1.append(torch.nn.BatchNorm2d(32))\n enc1.append(torch.nn.ReLU())\n\n enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))\n enc1.append(torch.nn.BatchNorm2d(32))\n enc1.append(torch.nn.ReLU())\n\n enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))\n enc1.append(torch.nn.BatchNorm2d(32))\n enc1.append(torch.nn.ReLU())\n\n self.enc1 = torch.nn.Sequential(*enc1)\n\n self.out = torch.nn.Conv2d(32, 1, kernel_size, padding=0, stride=1)\n", "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport tensorflow as tf\nfrom tensorflow.contrib import slim\n\nimport argparse\nimport cv2\nimport numpy as np\nfrom glob import glob\nimport matplotlib.pyplot as plt\n\n\nnum_classes = 2\nimg_height, img_width = 64, 64#572, 572\nout_height, out_width = 64, 64#388, 388\n \ndef Mynet(x, keep_prob, train=False):\n # block conv1\n\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n #activation_fn=tf.nn.relu,\n weights_initializer=tf.truncated_normal_initializer(0.0, 0.01)):\n #weights_regularizer=slim.l2_regularizer(0.0005)):\n for i in range(2):\n x = slim.conv2d(x, 32, [3,3], scope='conv1_{}'.format(i+1))\n x = tf.nn.relu(x)\n x = slim.batch_norm(x, is_training=train)\n \n x = slim.max_pool2d(x, [2,2], scope='pool1')\n\n for i in range(2):\n x = slim.conv2d(x, 32, [3,3], scope='conv2_{}'.format(i+1))\n x = tf.nn.relu(x)\n x = slim.batch_norm(x, is_training=train)\n \n #_, _h, _w, _c = x.shape\n #x = tf.image.resize_images(x, [_h*2, _w*2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n x = slim.conv2d_transpose(x, 32, [2,2], stride=2, scope='tconv')\n x = tf.nn.relu(x)\n x = slim.batch_norm(x, is_training=train)\n\n for i in range(2):\n x = slim.conv2d(x, 32, [3,3], scope='dec1_{}'.format(i+1))\n x = tf.nn.relu(x)\n x = slim.batch_norm(x, is_training=train)\n \n x = slim.conv2d(x, num_classes+1, [1, 1], scope='out')\n\n return x\n\n \nCLS = {'background': [0,0,0],\n 'akahara': [0,0,128],\n 'madara': [0,128,0]}\n \n# get train data\ndef data_load(path, hf=False, vf=False):\n xs = []\n ts = []\n paths = []\n \n for dir_path in glob(path + '/*'):\n for path in glob(dir_path + '/*'):\n x = cv2.imread(path)\n x = cv2.resize(x, (img_width, img_height)).astype(np.float32)\n x /= 255.\n x = x[..., ::-1]\n xs.append(x)\n\n gt_path = path.replace(\"images\", \"seg_images\").replace(\".jpg\", \".png\")\n gt = cv2.imread(gt_path)\n gt = cv2.resize(gt, (out_width, out_height), interpolation=cv2.INTER_NEAREST)\n\n t = np.zeros((out_height, out_width, num_classes+1), dtype=np.float)\n\n for i , (label, vs) in enumerate(CLS.items()):\n ind = (gt[...,0] == vs[0]) * (gt[...,1] == vs[1]) * (gt[...,2] == vs[2])\n ind = np.where(ind == True)\n t[ind[0], ind[1], i] = 1\n\n #ind = (gt[..., 0] == 0) * (gt[..., 1] == 0) * (gt[..., 2] == 0)\n #ind = np.where(ind == True)\n #t[ind[0], ind[1], 0] = 1\n #ind = (gt[...,0] > 0) + (gt[..., 1] > 0) + (gt[...,2] > 0)\n #t[ind] = 1\n\n #print(gt_path)\n #import matplotlib.pyplot as plt\n #plt.imshow(t, cmap='gray')\n #plt.show()\n\n ts.append(t)\n \n paths.append(path)\n\n if hf:\n xs.append(x[:, ::-1])\n ts.append(t[:, ::-1])\n paths.append(path)\n\n if vf:\n xs.append(x[::-1])\n ts.append(t[::-1])\n paths.append(path)\n\n if hf and vf:\n xs.append(x[::-1, ::-1])\n ts.append(t[::-1, ::-1])\n paths.append(path)\n\n xs = np.array(xs)\n ts = np.array(ts)\n\n return xs, ts, paths\n\n\n# train\ndef train():\n tf.reset_default_graph()\n\n # place holder\n X = tf.placeholder(tf.float32, [None, img_height, img_width, 3])\n Y = tf.placeholder(tf.float32, [None, num_classes+1])\n keep_prob = tf.placeholder(tf.float32)\n \n logits = Mynet(X, keep_prob, train=True)\n logits = tf.reshape(logits, [-1, num_classes+1])\n \n preds = tf.nn.softmax(logits)\n loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=Y))\n #loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))\n optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)\n train = optimizer.minimize(loss)\n\n correct_pred = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n\n xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)\n\n # training\n mb = 4\n mbi = 0\n train_ind = np.arange(len(xs))\n np.random.seed(0)\n np.random.shuffle(train_ind)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list=\"0\"\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n \n for i in range(500):\n if mbi + mb > len(xs):\n mb_ind = train_ind[mbi:]\n np.random.shuffle(train_ind)\n mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))\n mbi = mb - (len(xs) - mbi)\n else:\n mb_ind = train_ind[mbi: mbi+mb]\n mbi += mb\n\n x = xs[mb_ind]\n t = ts[mb_ind]\n\n t = np.reshape(t, [-1, num_classes+1])\n\n _, acc, los = sess.run([train, accuracy, loss], feed_dict={X: x, Y: t, keep_prob: 0.5})\n print(\"iter >>\", i+1, ',loss >>', los / mb, ',accuracy >>', acc)\n\n saver = tf.train.Saver()\n saver.save(sess, './cnn.ckpt')\n\n# test\ndef test():\n tf.reset_default_graph()\n\n X = tf.placeholder(tf.float32, [None, img_height, img_width, 3])\n Y = tf.placeholder(tf.float32, [None, num_classes])\n keep_prob = tf.placeholder(tf.float32)\n\n logits = Mynet(X, keep_prob, train=True)\n logits = tf.reshape(logits, [-1, num_classes+1])\n logits = tf.nn.softmax(logits)\n\n xs, ts, paths = data_load(\"../Dataset/test/images/\")\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list=\"0\"\n with tf.Session(config=config) as sess:\n saver = tf.train.Saver()\n #saver = tf.train.import_meta_graph(\"./cnn.ckpt.meta\")\n saver.restore(sess, \"./cnn.ckpt\")\n\n for i in range(len(paths)):\n x = xs[i]\n t = ts[i]\n path = paths[i]\n \n x = np.expand_dims(x, axis=0)\n\n pred = sess.run([logits], feed_dict={X: x, keep_prob:1.0})[0]\n pred = np.reshape(pred, [out_height, out_width, num_classes+1])\n pred = np.argmax(pred, axis=-1)\n\n # visualize\n out = np.zeros((out_height, out_width, 3), dtype=np.uint8)\n for i, (_, vs) in enumerate(CLS.items()):\n out[pred == i] = vs\n \n print(\"in {}\".format(path))\n \n plt.subplot(1,2,1)\n plt.imshow(x[0])\n plt.title(\"input\")\n plt.subplot(1,2,2)\n plt.imshow(out[..., ::-1])\n plt.title(\"predicted\")\n plt.show()\n\n \n\ndef arg_parse():\n parser = argparse.ArgumentParser(description='CNN implemented with Keras')\n parser.add_argument('--train', dest='train', action='store_true')\n parser.add_argument('--test', dest='test', action='store_true')\n args = parser.parse_args()\n return args\n\n# main\nif __name__ == '__main__':\n args = arg_parse()\n\n if args.train:\n train()\n if args.test:\n test()\n\n if not (args.train or args.test):\n print(\"please select train or test flag\")\n print(\"train: python main.py --train\")\n print(\"test: python main.py --test\")\n print(\"both: python main.py --train --test\")\n" ]
[ [ "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.manual_seed", "torch.nn.ReLU", "torch.nn.Conv2d" ], [ "tensorflow.contrib.slim.max_pool2d", "tensorflow.reshape", "numpy.where", "tensorflow.nn.softmax", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.argmax", "tensorflow.train.Saver", "tensorflow.ConfigProto", "tensorflow.contrib.slim.conv2d_transpose", "numpy.argmax", "numpy.expand_dims", "matplotlib.pyplot.subplot", "numpy.array", "tensorflow.nn.relu", "numpy.zeros", "numpy.reshape", "tensorflow.Session", "matplotlib.pyplot.title", "tensorflow.losses.softmax_cross_entropy", "numpy.random.shuffle", "tensorflow.placeholder", "tensorflow.contrib.slim.conv2d", "matplotlib.pyplot.show", "tensorflow.contrib.slim.batch_norm", "numpy.random.seed", "tensorflow.train.MomentumOptimizer", "tensorflow.reset_default_graph", "tensorflow.truncated_normal_initializer", "matplotlib.pyplot.imshow" ] ]
ancago/search-download-favicons-from-web
[ "c41c29940f94a52fc22248b62dade7e34b742ef6" ]
[ "A01_WEB_BROWSER_get_Official_WWWs_create_COM_domain.py" ]
[ "import pandas as pd\r\nimport time\r\nfrom google import google\r\nimport sys\r\nfrom A00_File_name import file_name\r\n\r\n\r\nfile_df = pd.read_csv(file_name, sep=';', encoding='latin-1')\r\nprint(file_df.head())\r\n\r\nbrand_names_list = file_df['Official Chain Name'].tolist()\r\n\r\n\r\n'''\r\ncreate a column with Official Brand WWWs\r\n'''\r\n# https://github.com/abenassi/Google-Search-API\r\n\r\nWWW = []\r\nfor index in range(len(brand_names_list)):\r\n search_results = google.search(str(brand_names_list[index]) +\r\n ' ' + str(file_df.iloc[index]['Category']) + \" official website\")\r\n time.sleep(3)\r\n result_nb = 0\r\n try:\r\n for i in range(len(search_results)):\r\n\r\n if \"wiki\" in str(search_results[i].link) or 'facebook' in str(search_results[i].link).lower() \\\r\n or'stackoverflow' in str(search_results[i].link).lower():\r\n print(str(index), 'wiki or facebook or stackoverflow')\r\n pass\r\n else:\r\n print(search_results[i].link)\r\n WWW.append(\"/\".join(search_results[i].link.split(\"/\", 3)[:3]))\r\n print(index, i)\r\n result_nb += 1\r\n break\r\n if result_nb == 0:\r\n WWW.append('[]')\r\n\r\n except OSError:\r\n WWW.append('Permission denial ' + str(sys.exc_info()[0]))\r\n except:\r\n WWW.append(sys.exc_info()[0])\r\n\r\nprint(len(brand_names_list))\r\nprint(len(WWW))\r\n\r\n\r\n'''\r\ncreate a column with .com domain\r\n'''\r\n\r\n\r\ndef create_www_brand_COM(brand_name):\r\n newstr = brand_name.replace(\"'\", \"\")\r\n newstr = newstr.replace(\" \", \"\")\r\n newstr = newstr.replace(\".\", \"\")\r\n newstr = newstr.replace(\"&\", \"\")\r\n newstr = newstr.replace(\"-\", \"\")\r\n\r\n newstr = newstr + '.com'\r\n newstr = newstr.lower()\r\n print(newstr)\r\n return newstr\r\n\r\n\r\nbrands_wwws = []\r\nfor name in file_df['Official Chain Name']:\r\n brands_wwws.append(create_www_brand_COM(name))\r\n print(brands_wwws)\r\n\r\n\r\nfile_df['Official Web Page'] = WWW\r\nfile_df['.com Web Page'] = brands_wwws\r\nprint(file_df.head())\r\n\r\nfile_df.to_csv(file_name[:-4] + '_URLs_from_WB.csv', sep=';')\r\n" ]
[ [ "pandas.read_csv" ] ]
kmaasrud/vmc
[ "1d29f18a4cb08ed65ab531a174f1869748f5ac2a" ]
[ "vmc/result_analysis/E_vs_MCs.py" ]
[ "import os\nimport matplotlib.pyplot as plt\nplt.style.use(\"seaborn\")\nimport numpy as np\nfrom lib.utils import read_csv, find_cargo_root\nfrom lib.blocking import block\n\ndata_folder = os.path.join(find_cargo_root(), \"data\")\nsave_folder = os.path.join(os.path.dirname(find_cargo_root()), \"report\", \"assets\")\nif not os.path.isdir(save_folder):\n os.mkdir(save_folder)\n\nN = 10\ntrue_val = 15\n\nbruteforce = read_csv(os.path.join(data_folder, \"E_vs_MCs_BruteForceMetropolis.csv\"))\nimportance = read_csv(os.path.join(data_folder, \"E_vs_MCs_ImportanceMetropolis.csv\"))\nx = [100, 1000, 3000, 5000, 7000, 10000]\n#bruteforce_std = [np.sqrt(block(np.array(vals))[1]) for vals in [bruteforce[\"energy[au]\"][1:up_to] for up_to in x]]\n#importance_std = [np.sqrt(block(np.array(vals))[1]) for vals in [importance[\"energy[au]\"][1:up_to] for up_to in x]]\n\n#plt.plot(x, bruteforce_std, \"-o\", label=\"Brute-force\")\n#plt.plot(x, importance_std, \"-o\", label=\"Importance\")\nplt.plot(range(len(bruteforce[\"energy[au]\"][1:])), bruteforce[\"energy[au]\"][1:], \"-o\", label=\"Brute-force\")\nplt.plot(range(len(importance[\"energy[au]\"][1:])), importance[\"energy[au]\"][1:], \"-o\", label=\"Importance\")\nplt.xlabel(\"Monte Carlo cycles\")\nplt.ylabel(r\"Energy\")\nplt.legend()\nplt.savefig(os.path.join(save_folder, \"E_vs_MCs_all.png\"))\nplt.show()" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show" ] ]
coodest/GAug
[ "ef6ab307e3dfd3e9e0a653d385dc1f41963f9ba8" ]
[ "vgae/utils.py" ]
[ "import time\nimport copy\nimport pickle\nimport warnings\nimport numpy as np\nimport scipy.sparse as sp\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.metrics import roc_auc_score, average_precision_score, precision_recall_curve, auc\n\ndef sparse_to_tuple(sparse_mx):\n if not sp.isspmatrix_coo(sparse_mx):\n sparse_mx = sparse_mx.tocoo()\n coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()\n values = sparse_mx.data\n shape = sparse_mx.shape\n return coords, values, shape\n\ndef get_scores(edges_pos, edges_neg, A_pred, adj_label):\n # get logists and labels\n preds = A_pred[edges_pos.T]\n preds_neg = A_pred[edges_neg.T]\n logists = np.hstack([preds, preds_neg])\n labels = np.hstack([np.ones(preds.size(0)), np.zeros(preds_neg.size(0))])\n # logists = A_pred.view(-1)\n # labels = adj_label.to_dense().view(-1)\n # calc scores\n roc_auc = roc_auc_score(labels, logists)\n ap_score = average_precision_score(labels, logists)\n precisions, recalls, thresholds = precision_recall_curve(labels, logists)\n pr_auc = auc(recalls, precisions)\n warnings.simplefilter('ignore', RuntimeWarning)\n f1s = np.nan_to_num(2*precisions*recalls/(precisions+recalls))\n best_comb = np.argmax(f1s)\n f1 = f1s[best_comb]\n pre = precisions[best_comb]\n rec = recalls[best_comb]\n thresh = thresholds[best_comb]\n # calc reconstracted adj_mat and accuracy with the threshold for best f1\n adj_rec = copy.deepcopy(A_pred)\n adj_rec[adj_rec < thresh] = 0\n adj_rec[adj_rec >= thresh] = 1\n labels_all = adj_label.to_dense().view(-1).long()\n preds_all = adj_rec.view(-1).long()\n recon_acc = (preds_all == labels_all).sum().float() / labels_all.size(0)\n results = {'roc': roc_auc,\n 'pr': pr_auc,\n 'ap': ap_score,\n 'pre': pre,\n 'rec': rec,\n 'f1': f1,\n 'acc': recon_acc,\n 'adj_recon': adj_rec}\n return results\n\ndef train_model(args, dl, vgae):\n optimizer = torch.optim.Adam(vgae.parameters(), lr=args.lr)\n # weights for log_lik loss\n adj_t = dl.adj_train\n norm_w = adj_t.shape[0]**2 / float((adj_t.shape[0]**2 - adj_t.sum()) * 2)\n pos_weight = torch.FloatTensor([float(adj_t.shape[0]**2 - adj_t.sum()) / adj_t.sum()]).to(args.device)\n # move input data and label to gpu if needed\n features = dl.features.to(args.device)\n adj_label = dl.adj_label.to_dense().to(args.device)\n\n best_vali_criterion = 0.0\n best_state_dict = None\n vgae.train()\n for epoch in range(args.epochs):\n t = time.time()\n A_pred = vgae(features)\n optimizer.zero_grad()\n loss = log_lik = norm_w*F.binary_cross_entropy_with_logits(A_pred, adj_label, pos_weight=pos_weight)\n if not args.gae:\n kl_divergence = 0.5/A_pred.size(0) * (1 + 2*vgae.logstd - vgae.mean**2 - torch.exp(2*vgae.logstd)).sum(1).mean()\n loss -= kl_divergence\n\n A_pred = torch.sigmoid(A_pred).detach().cpu()\n r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)\n print('Epoch{:3}: train_loss: {:.4f} recon_acc: {:.4f} val_roc: {:.4f} val_ap: {:.4f} f1: {:.4f} time: {:.4f}'.format(\n epoch+1, loss.item(), r['acc'], r['roc'], r['ap'], r['f1'], time.time()-t))\n if r[args.criterion] > best_vali_criterion:\n best_vali_criterion = r[args.criterion]\n best_state_dict = copy.deepcopy(vgae.state_dict())\n # r_test = get_scores(dl.test_edges, dl.test_edges_false, A_pred, dl.adj_label)\n r_test = r\n print(\" test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}\".format(\n r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))\n loss.backward()\n optimizer.step()\n\n print(\"Done! final results: test_roc: {:.4f} test_ap: {:.4f} test_f1: {:.4f} test_recon_acc: {:.4f}\".format(\n r_test['roc'], r_test['ap'], r_test['f1'], r_test['acc']))\n\n vgae.load_state_dict(best_state_dict)\n return vgae\n\ndef gen_graphs(args, dl, vgae):\n adj_orig = dl.adj_orig\n assert adj_orig.diagonal().sum() == 0\n # sp.csr_matrix\n if args.gae:\n pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0_gae.pkl', 'wb'))\n else:\n pickle.dump(adj_orig, open(f'graphs/{args.dataset}_graph_0.pkl', 'wb'))\n # sp.lil_matrix\n pickle.dump(dl.features_orig, open(f'graphs/{args.dataset}_features.pkl', 'wb'))\n features = dl.features.to(args.device)\n for i in range(args.gen_graphs):\n with torch.no_grad():\n A_pred = vgae(features)\n A_pred = torch.sigmoid(A_pred).detach().cpu()\n r = get_scores(dl.val_edges, dl.val_edges_false, A_pred, dl.adj_label)\n adj_recon = A_pred.numpy()\n np.fill_diagonal(adj_recon, 0)\n # np.ndarray\n if args.gae:\n filename = f'graphs/{args.dataset}_graph_{i+1}_logits_gae.pkl'\n else:\n filename = f'graphs/{args.dataset}_graph_{i+1}_logits.pkl'\n pickle.dump(adj_recon, open(filename, 'wb'))\n" ]
[ [ "torch.nn.functional.binary_cross_entropy_with_logits", "torch.sigmoid", "numpy.nan_to_num", "numpy.fill_diagonal", "sklearn.metrics.precision_recall_curve", "torch.no_grad", "sklearn.metrics.average_precision_score", "sklearn.metrics.auc", "numpy.argmax", "numpy.hstack", "scipy.sparse.isspmatrix_coo", "torch.exp", "sklearn.metrics.roc_auc_score", "numpy.vstack" ] ]
Darkar25/HyperGAN
[ "3153daee838dbb8e8d8926b1e81419682a24f2fe", "3153daee838dbb8e8d8926b1e81419682a24f2fe", "3153daee838dbb8e8d8926b1e81419682a24f2fe" ]
[ "hypergan/trainers/experimental/evolution_trainer.py", "hypergan/gans/distribution_filtering_gan.py", "hypergan/multi_component.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport hyperchamber as hc\nimport inspect\n\nfrom hypergan.trainers.base_trainer import BaseTrainer\n\nTINY = 1e-12\n\nclass EvolutionTrainer(BaseTrainer):\n def _create(self):\n gan = self.gan\n generator = self.gan.generator\n config = self.config\n\n d_vars = self.d_vars or gan.discriminator.variables()\n\n loss = self.loss or gan.loss\n d_loss, g_loss = loss.sample\n\n self.d_log = -tf.log(tf.abs(d_loss+TINY))\n\n\n d_optimizer = self.build_optimizer(config, 'd_', config.d_trainer, self.d_lr, d_vars, d_loss)\n #TODO more than one g_loss\n g_optimizer = [self.build_optimizer(config, 'g_', config.g_trainer, self.g_lr, g.variables(), g_loss) for g, l in zip(generator.children, loss.children_losses)]\n\n assign_children = []\n for p, o in generator.parent_child_tuples:\n for ov, pv in zip(o.variables(), p.variables()):\n op=tf.assign(ov, pv)\n if config.mutation_percent:\n op += tf.random_normal(self.gan.ops.shape(pv), mean=0, stddev=0.01) * tf.cast(tf.greater(config.mutation_percent, tf.random_uniform(shape=self.gan.ops.shape(pv), minval=0, maxval=1)), tf.float32)\n assign_children.append(op)\n self.clone_parent = tf.group(*assign_children)\n\n\n update_parent=[]\n for p, o in generator.parent_child_tuples:\n c_to_p = []\n for ov, pv in zip(o.variables(), p.variables()):\n op=tf.assign(pv, ov)\n c_to_p.append(op)\n update_parent.append(tf.group(*c_to_p))\n self.update_parent = update_parent\n f_lambda = config.f_lambda or 1\n\n def _squash(grads):\n return tf.add_n([tf.reshape(gan.ops.squash(g), [1]) for g in grads])\n children_grads = [_squash(tf.gradients(l, d_vars)) for l in loss.children_losses]\n if config.fitness == \"g\":\n self.measure_g = [-l for l in loss.children_losses]\n else:\n self.measure_g = [-l+f_lambda*(-tf.log(TINY+grad_d - tf.log(TINY+tf.nn.sigmoid(loss.d_loss)) - tf.log(TINY+1-tf.nn.sigmoid(l)))) for l, grad_d in zip(loss.children_losses, children_grads)]\n loss.metrics['measure_g'] = tf.reduce_mean(self.measure_g)\n loss.metrics['g_loss'] = loss.g_loss\n loss.metrics['d_loss'] = loss.d_loss\n\n self.g_loss = g_loss\n self.d_loss = d_loss\n self.d_optimizer = d_optimizer\n self.g_optimizer = g_optimizer\n self.hist = [0 for i in range(len(self.gan.generator.children))]\n\n return g_optimizer, d_optimizer\n\n def _step(self, feed_dict):\n gan = self.gan\n sess = gan.session\n config = self.config\n loss = self.loss or gan.loss\n metrics = loss.metrics\n generator = gan.generator\n\n d_loss, g_loss = loss.sample\n\n #winner = np.random.choice(range(len(gan.generator.children)))\n winners = []\n \n for i in range(len(generator.parents)):\n child_count = generator.config.child_count\n choices = self.measure_g[i*child_count:(i+1)*child_count]\n choice = np.argmax(sess.run(choices))\n winner = i*child_count + choice\n self.hist[winner]+=1\n winners.append(winner)\n sess.run([self.update_parent[winner] for winner in winners])\n for i in range(config.d_update_steps or 1):\n sess.run(self.d_optimizer)\n\n sess.run(self.clone_parent)\n for i in range(config.g_update_steps or 1):\n sess.run(self.g_optimizer)\n measure_g = sess.run(self.measure_g)\n\n if self.current_step % 100 == 0:\n hist_output = \" \" + \"\".join([\"G\"+str(i)+\":\"+str(v)+\" \"for i, v in enumerate(self.hist)])\n metric_values = sess.run(self.output_variables(metrics), feed_dict)\n print(str(self.output_string(metrics) % tuple([self.current_step] + metric_values)+hist_output))\n self.hist = [0 for i in range(len(self.gan.generator.children))]\n\n", "import importlib\nimport json\nimport numpy as np\nimport os\nimport sys\nimport time\nimport uuid\nimport copy\n\nfrom hypergan.discriminators import *\nfrom hypergan.distributions import *\nfrom hypergan.generators import *\nfrom hypergan.inputs import *\nfrom hypergan.samplers import *\nfrom hypergan.trainers import *\n\nimport hyperchamber as hc\nfrom hyperchamber import Config\nfrom hypergan.ops import TensorflowOps\nimport tensorflow as tf\nimport hypergan as hg\n\nfrom hypergan.gan_component import ValidationException, GANComponent\nfrom .standard_gan import StandardGAN\n\nclass DistributionFilteringGAN(StandardGAN):\n \"\"\"\n On Stabilizing Generative Adversarial Training with Noise\n https://arxiv.org/pdf/1906.04612v1.pdf\n \"\"\"\n def create(self):\n config = self.config\n\n with tf.device(self.device):\n self.session = self.ops.new_session(self.ops_config)\n self.latent = self.create_component(config.z_distribution or config.latent)\n self.uniform_distribution = self.latent\n\n z_shape = self.ops.shape(self.latent.sample)\n self.android_input = tf.reshape(self.latent.sample, [-1])\n\n direction, slider = self.create_controls(self.ops.shape(self.android_input))\n self.slider = slider\n self.direction = direction\n z = self.android_input + slider * direction\n z = tf.maximum(-1., z)\n z = tf.minimum(1., z)\n z = tf.reshape(z, z_shape)\n self.control_z = z\n\n self.generator = self.create_component(config.generator, name=\"generator\", input=z)\n self.noise_generator = self.create_component((config.noise_generator or config.generator), name=\"noise_generator\", input=z)\n\n #x, g = tf.concat([self.inputs.x, self.inputs.x + self.noise_generator.sample], axis=3), tf.concat([self.generator.sample, self.generator.sample + self.noise_generator.sample], axis=3)\n\n x1, g1 = self.inputs.x, self.generator.sample\n self.discriminator = self.create_component(config.discriminator, name=\"discriminator\", input=tf.concat([x1,g1],axis=0))\n x2, g2 = self.inputs.x+self.noise_generator.sample, self.generator.sample+self.noise_generator.sample\n self.loss = self.create_component(config.loss, discriminator=self.discriminator)\n self.noise_discriminator = self.create_component(config.discriminator, name=\"discriminator\", input=tf.concat([x2,g2],axis=0), reuse=True)\n noise_loss = self.create_component(config.loss, discriminator=self.noise_discriminator)\n self.loss.sample[0] += noise_loss.sample[0]\n self.loss.sample[1] += noise_loss.sample[1]\n self.trainer = self.create_component(config.trainer)\n\n self.android_output = tf.reshape(self.generator.sample, [-1])\n\n self.session.run(tf.global_variables_initializer())\n\n def g_vars(self):\n return self.latent.variables() + self.generator.variables() + self.noise_generator.variables()\n def d_vars(self):\n return self.discriminator.variables()\n", "from hypergan.gan_component import GANComponent\n\nimport tensorflow as tf\n\nclass MultiComponent():\n \"\"\"\n Used to combine multiple components into one. For example, `gan.loss = MultiComponent([loss1, loss2])`\n \"\"\"\n def __init__(self, components=[], combine='concat'):\n self.components = components\n self.gan = components[0].gan\n self._combine = combine\n self._cache = {}\n\n def __getattr__(self, name):\n if len(self.components) == 0:\n return None\n\n attributes = self.lookup(name)\n self._cache[name] = self.combine(name, attributes)\n return self._cache[name]\n\n def lookup(self, name):\n lookups = []\n for component in self.components:\n if hasattr(component, name):\n lookups.append(getattr(component,name))\n else:\n print(\"Warning:Skipping lookup of \"+name+\" because None was returned\")\n\n return lookups\n\n def combine(self, name, data):\n if data == None or data == []:\n return data\n\n if isinstance(data[0], type({})):\n full_dict = {}\n for d in data:\n full_dict.update(d)\n return full_dict\n # loss functions return [d_loss, g_loss]. We combine columnwise.\n if isinstance(data, list) and isinstance(data[0], list) and isinstance(data[0][0], tf.Tensor):\n if(name in self._cache):\n return self._cache[name]\n result = []\n for j,_ in enumerate(data[0]):\n column = []\n for i,_ in enumerate(data):\n column.append(data[i][j])\n reduction = self.reduce(column)\n result.append(reduction)\n\n return result\n\n if type(data[0]) == tf.Tensor:\n if(name in self._cache):\n return self._cache[name]\n return self.reduce(data)\n if callable(data[0]):\n return self.call_each(data)\n return data\n\n def reduce(self, data):\n data = [d for d in data if d is not None]\n ops = self.gan.ops\n if self._combine == 'concat':\n return self.gan.ops.concat(values=data, axis=len(self.gan.ops.shape(data[0]))-1)\n elif self._combine == 'add':\n data = [ops.reshape(d,ops.shape(data[0])) for d in data]\n return self.gan.ops.add_n(data)\n elif self._combine == 'mask':\n def _mask(_net):\n m=tf.slice(_net,[0,0,0,0], [-1,-1,-1,1])\n d=tf.slice(_net,[0,0,0,1], [-1,-1,-1,ops.shape(_net)[-1]-1])\n return tf.nn.sigmoid(m)*d\n data = [_mask(d) for d in data]\n return self.gan.ops.add_n(data)\n\n raise \"Unknown combine\" + self._combine\n\n def call_each(self, methods):\n def do_call(*args, **kwargs):\n results = []\n for method in methods:\n results.append(method(*args, **kwargs))\n return self.combine(str(method), results)\n return do_call\n" ]
[ [ "tensorflow.abs", "tensorflow.assign", "tensorflow.group", "tensorflow.gradients", "tensorflow.nn.sigmoid", "tensorflow.reduce_mean" ], [ "tensorflow.minimum", "tensorflow.concat", "tensorflow.reshape", "tensorflow.device", "tensorflow.maximum", "tensorflow.global_variables_initializer" ], [ "tensorflow.slice", "tensorflow.nn.sigmoid" ] ]
qwerlarlgus/YOLO_Project1
[ "27e7b325439e59c8cf0ee9d6cdfd802a4de6c7d4" ]
[ "contours.py" ]
[ "import sys\nimport random\nimport numpy as np\nimport cv2\n\nsrc = cv2.imread('vlcsnap-2021-02-04-10h00m02s260.png')\n#src = cv2.imread('2_11_11.png')\n\nif src is None:\n print('Image load failed!')\n sys.exit()\n\nsrc = cv2.resize(src, (0, 0), fx=0.5, fy=0.5)\nsrc_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n\nh, w = src.shape[:2]\ndst1 = np.zeros((h, w, 3), np.uint8)\ndst2 = np.zeros((h, w, 3), np.uint8)\n\n# 이진화\n_, src_bin = cv2.threshold(src_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n\n# 외곽선 검출\ncontours, _ = cv2.findContours(src_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\nfor i in range(len(contours)):\n pts = contours[i]\n\n c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n cv2.drawContours(dst1, contours, i, c, 1)\n\n # 너무 작은 객체는 제외\n if (cv2.contourArea(pts) < 1000):\n continue\n\n # 외곽선 근사화\n approx = cv2.approxPolyDP(pts, cv2.arcLength(pts, True)*0.02, True)\n\n # 컨벡스가 아니면 제외\n if not cv2.isContourConvex(approx):\n continue\n\n if len(approx) == 4:\n cv2.drawContours(dst2, contours, i, c, 2)\n\nprint(contours)\n \ncv2.imshow('src', src)\ncv2.imshow('src_bin', src_bin)\ncv2.imshow('dst1', dst1)\ncv2.imshow('dst2', dst2)\ncv2.waitKey()\n\ncv2.destroyAllWindows()\n\n" ]
[ [ "numpy.zeros" ] ]
MarcWong/PointMVSNet
[ "b48f20f3695eb4418f522daedb60e7329eebf05f" ]
[ "pointmvsnet/utils/preprocess.py" ]
[ "import numpy as np\nimport cv2\nimport math\n\n\ndef norm_image(img):\n \"\"\" normalize image input \"\"\"\n img = img.astype(np.float32)\n var = np.var(img, axis=(0, 1), keepdims=True)\n mean = np.mean(img, axis=(0, 1), keepdims=True)\n return (img - mean) / (np.sqrt(var) + 1e-7)\n\n\ndef mask_depth_image(depth_image, min_depth, max_depth):\n \"\"\" mask out-of-range pixel to zero \"\"\"\n # print ('mask min max', min_depth, max_depth)\n ret, depth_image = cv2.threshold(depth_image, min_depth, 100000, cv2.THRESH_TOZERO)\n ret, depth_image = cv2.threshold(depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)\n depth_image = np.expand_dims(depth_image, 2)\n return depth_image\n\n\ndef scale_camera(cam, scale=1):\n \"\"\" resize input in order to produce sampled depth map \"\"\"\n new_cam = np.copy(cam)\n # focal:\n new_cam[1][0][0] = cam[1][0][0] * scale\n new_cam[1][1][1] = cam[1][1][1] * scale\n # principle point:\n new_cam[1][0][2] = cam[1][0][2] * scale\n new_cam[1][1][2] = cam[1][1][2] * scale\n return new_cam\n\n\ndef scale_image(image, scale=1, interpolation='linear'):\n \"\"\" resize image using cv2 \"\"\"\n if interpolation == 'linear':\n return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)\n if interpolation == 'nearest':\n return cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)\n\n\ndef scale_dtu_input(images, cams, depth_image=None, scale=1):\n \"\"\" resize input to fit into the memory \"\"\"\n for view in range(len(images)):\n images[view] = scale_image(images[view], scale=scale)\n cams[view] = scale_camera(cams[view], scale=scale)\n\n if depth_image is None:\n return images, cams\n else:\n depth_image = scale_image(depth_image, scale=scale, interpolation='nearest')\n return images, cams, depth_image\n\n\ndef crop_dtu_input(images, cams, height, width, base_image_size, depth_image=None):\n \"\"\" resize images and cameras to fit the network (can be divided by base image size) \"\"\"\n\n # crop images and cameras\n for view in range(len(images)):\n h, w = images[view].shape[0:2]\n new_h = h\n new_w = w\n if new_h > height:\n new_h = height\n else:\n new_h = int(math.floor(h / base_image_size) * base_image_size)\n if new_w > width:\n new_w = width\n else:\n new_w = int(math.floor(w / base_image_size) * base_image_size)\n start_h = int(math.floor((h - new_h) / 2))\n start_w = int(math.floor((w - new_w) / 2))\n finish_h = start_h + new_h\n finish_w = start_w + new_w\n images[view] = images[view][start_h:finish_h, start_w:finish_w]\n cams[view][1][0][2] = cams[view][1][0][2] - start_w\n cams[view][1][1][2] = cams[view][1][1][2] - start_h\n\n # crop depth image\n if not depth_image is None:\n depth_image = depth_image[start_h:finish_h, start_w:finish_w]\n return images, cams, depth_image\n else:\n return images, cams\n" ]
[ [ "numpy.copy", "numpy.mean", "numpy.sqrt", "numpy.var", "numpy.expand_dims" ] ]
shimakaze-git/football-data-analysis
[ "12c45e9888436dba71418867512fecddd4683f31" ]
[ "playstyle_similar/playstyle_similar2.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom scipy import stats\n\n\ndef columns_views(player_1_df, player_2_df):\n\n columns = list(player_1_df.columns)\n if list(player_1_df.columns) == list(player_2_df.columns):\n columns = list(player_1_df.columns)\n\n player_1 = list(player_1_df.values[0])\n player_2 = list(player_2_df.values[0])\n\n views = []\n for column, player1, player2 in zip(columns, player_1, player_2):\n print('column : {} _ player1-{} , player2-{} < diff : {} >'.format(\n column, player1, player2, abs(player1 - player2)\n ))\n views.append(abs(player1 - player2))\n\n print(views)\n\n\ndef convert_preferred_foot(df):\n\n df['preferred_foot'] = df['preferred_foot'].replace('Right', 1)\n df['preferred_foot'] = df['preferred_foot'].replace('Left', 2)\n\n return df\n\n\ndef convert_work_rate(df):\n\n convert = {\n 'High': 3,\n 'Medium': 2,\n 'Low': 1\n }\n\n work_rate = df['work_rate'].values[0].split('/')\n\n attack = work_rate[0]\n defense = work_rate[1]\n\n df['attack'] = convert[attack]\n df['defense'] = convert[defense]\n\n # work_rateの削除処理\n df = df.drop(columns='work_rate')\n\n return df\n\n\ndef euclidean_distance(v1, v2):\n # ユーグリッド距離を算出\n # https://qiita.com/shim0mura/items/64918dad83d162ef2ac2#ユークリッド距離\n\n # どちらも同じ値を返す\n # distance = np.linalg.norm(v1 - v2)\n distance = np.sqrt(np.power(v1 - v2, 2).sum())\n\n # 0から1までの値で似ていれば似ているほど1に近くなる、みたいな類似度として分かりやすい値が欲しい。\n # 0での除算エラーを防ぐためにこのdに1を足して逆数をとるとそのような値を取ることが出来る。\n # 1/(1+d)\n\n # print('distance', distance)\n\n return 1 / (1 + distance)\n\n\ndef cos_similarity(v1, v2):\n # Scipyを使ってコサイン類似度を求める方法\n # import scipy.spatial.distance as dis\n\n # print(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))\n # print(dis.cosine(v1, v2))\n\n # return dis.cosine(v1, v2)\n\n # cos類似度を算出\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n\n\n# ピアソンの積率相関係数\ndef pearson_product_moment_correlation_coefficien(v1, v2):\n # corr = np.corrcoef(v1, v2)[0, 1]\n corr = stats.pearsonr(v1, v2)\n\n return corr\n\n\n# スピアマンの順位相関係数\ndef spearman_rank_correlation_coefficient(v1, v2):\n corr = stats.spearmanr(v1, v2)\n\n return corr\n\n\n# ケンドールの順位相関係数\ndef kendalltau_rank_correlation_coefficient(v1, v2):\n corr = stats.kendalltau(v1, v2)\n\n return corr\n\n\ndef similarity(v1_df, v2_df):\n v1_value = v1_df.values[0]\n v2_value = v2_df.values[0]\n\n print('v1_value', v1_value)\n print('v2_value', v2_value)\n\n # リストをps.Seriesに変換\n s1 = pd.Series(list(v1_value))\n s2 = pd.Series(list(v2_value))\n\n # 相関係数を計算\n res = s1.corr(s2)\n print(res)\n\n corr = pearson_product_moment_correlation_coefficien(\n v1_value, v2_value\n )\n print('pearson_product_moment_correlation_coefficien', corr)\n\n corr = spearman_rank_correlation_coefficient(\n v1_value, v2_value\n )\n print('spearman_rank_correlation_coefficient', corr)\n\n corr = kendalltau_rank_correlation_coefficient(\n v1_value, v2_value\n )\n print('kendalltau_rank_correlation_coefficient', corr)\n\n e_distance = euclidean_distance(v1_value, v2_value)\n print('e_distance', e_distance)\n\n # return euclidean_distance(v1_value, v2_value)\n # return res\n return cos_similarity(v1_value, v2_value)\n\n\n# 数値型の整形\ndef shaping_num(value):\n\n if '+' in value:\n value = str(value).split('+')\n value = int(value[0]) + int(value[1])\n return value\n\n if '-' in value:\n value = str(value).split('-')\n value = int(value[0]) - int(value[1])\n return value\n\n return value\n\n\ndef need_columns(df):\n columns = [\n 'height_cm',\n 'weight_kg',\n 'preferred_foot',\n 'weak_foot',\n 'skill_moves',\n 'work_rate',\n 'player_tags',\n\n 'pace',\n 'shooting',\n 'passing',\n 'dribbling',\n 'defending',\n 'physic',\n 'player_traits',\n\n 'attacking_crossing',\n 'attacking_finishing',\n 'attacking_heading_accuracy',\n 'attacking_short_passing',\n 'attacking_volleys',\n\n 'skill_dribbling',\n 'skill_curve',\n 'skill_fk_accuracy',\n 'skill_long_passing',\n 'skill_ball_control',\n\n 'movement_acceleration',\n 'movement_sprint_speed',\n 'movement_agility',\n 'movement_reactions',\n 'movement_balance',\n\n 'power_shot_power',\n 'power_jumping',\n 'power_stamina',\n 'power_strength',\n 'power_long_shots',\n\n 'mentality_aggression',\n 'mentality_interceptions',\n 'mentality_positioning',\n 'mentality_vision',\n 'mentality_penalties',\n 'mentality_composure',\n\n 'defending_marking',\n 'defending_standing_tackle',\n 'defending_sliding_tackle'\n ]\n\n columns += [\n 'ls', 'st', 'rs',\n 'lw', 'lf', 'cf', 'rf', 'rw',\n 'lam', 'cam', 'ram',\n 'lm', 'lcm', 'cm', 'rcm', 'rm',\n 'lwb', 'ldm', 'cdm', 'rdm', 'rwb',\n 'lb', 'lcb', 'cb', 'rcb', 'rb'\n ]\n # ls,st,rs,lw,lf,cf,rf,rw,\n # lam,cam,ram,lm,lcm,cm,rcm,rm,\n # lwb,ldm,cdm,rdm,rwb,lb,lcb,cb,rcb,rb\n\n return df[columns]\n\n\ndef convert_num_values(player_1_df, player_2_df):\n num_values = [\n 'pace',\n 'shooting',\n 'passing',\n 'dribbling',\n 'defending',\n 'physic',\n 'attacking_crossing',\n 'attacking_finishing',\n 'attacking_heading_accuracy',\n 'attacking_short_passing',\n 'attacking_volleys',\n 'skill_dribbling',\n 'skill_curve',\n 'skill_fk_accuracy',\n 'skill_long_passing',\n 'skill_ball_control',\n 'movement_acceleration',\n 'movement_sprint_speed',\n 'movement_agility',\n 'movement_reactions',\n 'movement_balance',\n 'power_shot_power',\n 'power_jumping',\n 'power_stamina',\n 'power_strength',\n 'power_long_shots',\n 'mentality_aggression',\n 'mentality_interceptions',\n 'mentality_positioning',\n 'mentality_vision',\n 'mentality_penalties',\n 'mentality_composure',\n 'defending_marking',\n 'defending_standing_tackle',\n 'defending_sliding_tackle'\n ]\n\n num_values += [\n 'ls', 'st', 'rs',\n 'lw', 'lf', 'cf', 'rf', 'rw',\n 'lam', 'cam', 'ram',\n 'lm', 'lcm', 'cm', 'rcm', 'rm',\n 'lwb', 'ldm', 'cdm', 'rdm', 'rwb',\n 'lb', 'lcb', 'cb', 'rcb', 'rb'\n ]\n\n for v in num_values:\n # player1のデータの数値の整形\n value = player_1_df[v].values.astype(str)[0]\n value = shaping_num(str(value))\n\n # player_1_df[v] = float(value) * 0.01\n player_1_df[v] = float(value)\n\n # player2のデータの数値の整形\n value = player_2_df[v].values.astype(str)[0]\n value = shaping_num(str(value))\n\n # player_2_df[v] = float(value) * 0.01\n player_2_df[v] = float(value)\n\n return player_1_df, player_2_df\n\n\ndef convert_traits(player_1_df, player_2_df):\n\n # 選手特性関連の処理\n traits_list = [\n 'Backs Into Player', # FIFA 18だけの項目\n 'Bicycle Kicks',\n 'Chip Shot',\n 'Dives Into Tackles',\n 'Early Crosser',\n 'Fancy Passes',\n 'Finesse Shot',\n 'Flair',\n 'Giant Throw-In',\n 'GK Cautious With Crosses',\n 'GK Comes For Crosses',\n 'GK Flat Kick',\n 'GK Long Thrower',\n 'GK Save With Foot',\n 'Injury Prone',\n 'Leadership',\n 'Long Passer',\n 'Long Shot Taker',\n 'Long Throw-In',\n 'One Club Player',\n 'Outside Foot Shot',\n 'Play Maker',\n 'Power Header',\n 'Rushes Out Of Goal',\n 'Second Wind',\n 'Set Play Specialist',\n 'Solid Player',\n 'Speed Dribbler',\n 'Swerve',\n 'Takes Powerful Driven Free Kicks',\n 'Team Player',\n 'Technical Dribbler'\n ]\n\n player_1_df_player_traits = player_1_df['player_traits']\n player_2_df_player_traits = player_2_df['player_traits']\n\n player_1_df = player_1_df.drop(columns='player_traits')\n player_2_df = player_2_df.drop(columns='player_traits')\n\n for trait in traits_list:\n trait_value = 0\n for p_trait in player_1_df_player_traits.values[0].split(','):\n if trait in p_trait:\n trait_value = 1\n break\n player_1_df[trait] = trait_value\n\n trait_value = 0\n for p_trait in player_2_df_player_traits.values[0].split(','):\n if trait in p_trait:\n trait_value = 1\n break\n player_2_df[trait] = trait_value\n\n return player_1_df, player_2_df\n\n\ndef players_comparison(player_1, player_2):\n df = pd.read_csv('data/players_18.csv')\n\n player_1_df = df.query('sofifa_id == {}'.format(player_1))\n player_2_df = df.query('sofifa_id == {}'.format(player_2))\n # david_silva = df.query('sofifa_id == 189881')\n\n player_1_df = need_columns(player_1_df)\n player_2_df = need_columns(player_2_df)\n\n # num_valuesの変換処理\n player_1_df, player_2_df = convert_num_values(player_1_df, player_2_df)\n\n # 選手特性関連の処理\n player_1_df, player_2_df = convert_traits(player_1_df, player_2_df)\n\n # 選手タグ関連の処理\n player_1_df = player_1_df.drop(columns='player_tags')\n player_2_df = player_2_df.drop(columns='player_tags')\n\n # 利き足の変換\n player_1_df = convert_preferred_foot(player_1_df)\n player_2_df = convert_preferred_foot(player_2_df)\n\n # 攻撃/守備の優先度の変換\n player_1_df = convert_work_rate(player_1_df)\n player_2_df = convert_work_rate(player_2_df)\n\n # print(player_1_df.values[0])\n # print(player_2_df.values)\n cos = similarity(player_1_df, player_2_df)\n print('cos', cos)\n\n # カラムの表示\n # columns_views(player_1_df, player_2_df)\n\n\nshinji_kagawa = 189358\ndavid_silva = 178088\n# david_silva = 41\n# 香川真司 : 189358\n# 本田圭佑 : 186581\n# 清武弘嗣 : 210126\n# イニエスタ: 41\n# スモーリング : 189881\n# セルヒオ・ラモス : 155862\n# マリオ・ゲッツェ : 192318\n# ユリアン・ヴァイグル : 222028\n# ファン・マタ : 178088\n# イスコ : 197781\n# ダビド・シルバ : 168542\n# マルク・バルトラ : 198141\n# ロメル・ルカク : 192505\n# デブルイネ : 192985\n# モドリッチ : 177003\n# クロース : 182521\n# ラキティッチ : 168651\n# ウサマ・デンベレ : 231443\n# リオネル・メッシ : 158023\n# フンメルス : 178603\n# ピケ: 152729\n# ボアテング : 183907\n# メスト・エジル : 176635\n# マルコ・ロイス : 188350\n# イヴァン・ペリシッチ : 181458\n# トーマス・ミュラー : 189596\n# オスカル : 188152\n# ヤルモレンコ : 194794\n# エデン・アザール : 183277\n# ネイマール : 190871\n# ロッベン : 9014\n# サラー : 209331\n# ハリー・ケイン : 202126\n# ムバッペ : 231747\n# グリーズマン : 194765\n# ジェラール・ピケ : 152729\n\n\nplayers_comparison(shinji_kagawa, david_silva)\n\n# columns_views(shinji_kagawa, david_silva)\n\n# Weak Foot(逆足)\n# https://www.fifplay.com/encyclopedia/weak-foot/\n\n# Work Rate(作業率)\n# https://www.fifplay.com/encyclopedia/work-rate/\n\n# ユークリッド距離 vs コサイン類似度\n# https://enjoyworks.jp/tech-blog/2242\n" ]
[ [ "scipy.stats.kendalltau", "numpy.dot", "numpy.linalg.norm", "scipy.stats.spearmanr", "scipy.stats.pearsonr", "numpy.power", "pandas.read_csv" ] ]
reubenjohn/sandblox
[ "0b7917eb866ddbc4749a098884046d4ebb441985" ]
[ "sandblox/util/scope.py" ]
[ "from typing import Any\n\nimport tensorflow as tf\n\nfrom .tf_util import scope_name as get_scope_name\n\n\ndef absolute_scope_name(relative_scope_name):\n\t\"\"\"Appends parent scope name to `relative_scope_name`\"\"\"\n\tbase = get_scope_name()\n\tif len(base) > 0:\n\t\tbase += '/'\n\treturn base + relative_scope_name\n\n\ndef _infer_scope_name(self, scope_name):\n\treturn scope_name if scope_name is not None else type(self).__name__\n\n\ninfer_rel_scope_name = _infer_scope_name\n\n\ndef infer_abs_scope_name(self, scope_name: str = None):\n\tscope_name = infer_rel_scope_name(self, scope_name)\n\treturn absolute_scope_name(scope_name)\n\n\nclass Scope(object):\n\tdef __init__(self, scope_name: str, obj: Any = None):\n\t\tself.rel = self.abs = None\n\t\tself.setup(scope_name, obj)\n\n\tdef setup(self, scope_name: str, obj: Any = None):\n\t\tif scope_name is None:\n\t\t\tassert obj is not None, 'Must provide either scope_name or a reference object to infer scope_name'\n\t\t\tscope_name = type(obj).__name__\n\t\tself.rel = scope_name\n\t\tself.abs = absolute_scope_name(self.rel)\n\n\tdef make_unique(self, graph=None):\n\t\tif graph is None:\n\t\t\tgraph = tf.get_default_graph()\n\t\tself.rel = graph.unique_name(self.rel)\n\t\tself.setup(self.rel)\n\n\t@property\n\tdef exact_rel_pattern(self) -> str:\n\t\treturn self.abs + '/'\n\n\t@property\n\tdef exact_abs_pattern(self) -> str:\n\t\treturn '^' + self.abs + '/'\n\n\nclass UninitializedScope(Scope):\n\t# noinspection PyMissingConstructor\n\tdef __init__(self):\n\t\tpass\n\n\tdef __getattribute__(self, item):\n\t\traise AttributeError('The scope is only available after you call super constructor __init__.\\n'\n\t\t\t\t\t\t\t 'Alternatively, manually setup the scope with self.setup_scope(scope_name)')\n" ]
[ [ "tensorflow.get_default_graph" ] ]
yingchaolu/PROBLEM
[ "aedf8d50276be3074f3035f7ee28c8d679ebb416" ]
[ "problem/deflect.py" ]
[ "\"\"\"\nTakes the gradients of the solution to the screen mapping potential problem and\nreconstructs the perpendicular deflection field.\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nimport scipy.interpolate\nimport scipy.misc\nimport scipy.ndimage\n\nfrom .constants import M_PROTON_G, ESU, C_CMS\n\ndef reconstruct(ri, li, rs, v, x, y, phix, phiy):\n \"\"\"\n Takes x, y gradients to the solution to screen mapping potential problem and\n reconstructs the perpendicular deflection fields wBx and wBy.\n\n Args:\n ri (float): Distance from source to plasma (cm).\n li (float): Distance across plasma (cm).\n rs (float): Distance from plasma to screen (cm).\n v (float): Velocity of protons (cm/s).\n x (array): Plasma x-coordinates (cm). \n y (array): Plasma x-coordinates (cm).\n phix (array): Gradient of screen mapping potential in x-direction.\n phiy (array): Gradient of screen mapping potential in y-direction.\n\n Returns:\n wBx (array)\n \n \"\"\"\n # TODO Add in option for masking the path-int B field.\n \n # Input variables.\n magnify = (rs + ri + .5*li)/(ri+.5*li)\n map_pot_x = np.copy(phix)\n map_pot_y = np.copy(phiy)\n plasma_x = np.copy(x)\n plasma_y = np.copy(y)\n \n # We multiply the whole expression by magnify to put the perp-deflection\n # fields into screen coordinates.\n wBx = magnify*(v/rs)*(map_pot_x - plasma_x)\n wBy = magnify*(v/rs)*(map_pot_y - plasma_y)\n \n return(wBx, wBy)\n\ndef magpath(wBx, wBy):\n \"\"\"\n Takes the perpendicular deflection field and reconstructs the path\n integrated magnetic field.\n\n Args:\n wBx (array): x-component perpendicular deflection field.\n wBy (array): y-component perpendicular deflection field.\n\n Returns:\n Bxpath (array): Path integrated magnetic field x-component. \n Bypath (array): Path integrated magnetic field y-component.\n \"\"\"\n \n Bxpath = -(M_PROTON_G*C_CMS/ESU)*wBy\n Bypath = (M_PROTON_G*C_CMS/ESU)*wBx\n\n\n return(Bxpath, Bypath)\n\ndef fluximage(ri, li, rs, v, x, y, N, wBx, wBy):\n \"\"\"\n Creates a flux image out of a perpendicular deflection field. \n\n Args:\n ri:\n li:\n rs:\n v:\n x (array): Perpendicular deflection field x-coordinates.\n y (array): Perpendicular deflection field y-coordinates.\n wBx (array): Perpendicular deflection field x-component.\n wBy (array): Perpendicular deflection field y-component.\n\n Returns:\n flux_image (array): Generated flux image.\n \"\"\"\n # TODO Maybe change this to act on the reference flux.\n magnify = (rs+ri+.5*li)/(ri+.5*li)\n \n print('Creating interpolator functions...')\n \n #fx = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),x,\n # bounds_error=False)\n #fy = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),y,\n # bounds_error=False)\n fwBx = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),wBx,\n bounds_error=False)\n fwBy = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),wBy,\n bounds_error=False)\n \n print('DONE')\n\n prot_num = int(np.sqrt(N))\n dx = x[1,0] - x[0,0]\n dy = y[0,1] - y[0,0]\n # Need to fix this-- cuts off some of the protons when moving to the centers\n # of the bins.\n samp_x = np.linspace(x[0,0]+.5*dx, x[-1,0]-.5*dx, num=prot_num)\n samp_y = np.linspace(y[0,0]+.5*dy, y[0,-1]-.5*dy, num=prot_num)\n samp_x, samp_y = np.meshgrid(samp_x, samp_y, indexing='ij')\n \n print('Interpolating proton deflections...')\n \n # The sampling of the coordinates is useless.\n #samp_x = fx((samp_x, samp_y))\n #samp_y = fy((samp_x, samp_y))\n samp_wBx = fwBx((samp_x, samp_y))\n samp_wBy = fwBy((samp_x, samp_y))\n \n print('DONE')\n\n screen_x = magnify*samp_x + (rs/v)*samp_wBx\n screen_y = magnify*samp_y + (rs/v)*samp_wBy\n\n print('Histogramming protons...')\n\n flux_image = np.histogram2d(screen_x.ravel(), screen_y.ravel(),bins=x.shape)\n \n print('DONE')\n \n return(flux_image[0])\n\n\ndef fluximage2(x, y, phix, phiy, flux0, scale_fact=1, scale_order=3):\n \"\"\"\n An alternative approach to creating a flux image out of a perpendicular deflection field. \n \n Args:\n x (array): Plasma x-coordinates (cm). \n y (array): Plasma x-coordinates (cm).\n phix (array): Gradient of screen mapping potential in x-direction.\n phiy (array): Gradient of screen mapping potential in y-direction.\n scale_fact: Integer factor by which to upscale arrays before analysis; a larger number slows the algorithm but fills out low-flux regions better\n scale_order: Order of the spline interpolation for scipy.ndimage.zoom\n Returns:\n flux_image (array): Generated flux image.\n \"\"\" \n \n xgv = x[:,0].flatten()\n ygv = y[0,:].flatten()\n \n if scale_fact != 1:\n print(\"Rescaling...\")\n xgv = scipy.ndimage.zoom(xgv, scale_fact, order=scale_order)\n ygv = scipy.ndimage.zoom(ygv, scale_fact, order=scale_order)\n phix = scipy.ndimage.zoom(phix, scale_fact, order=scale_order)\n phiy = scipy.ndimage.zoom(phiy, scale_fact, order=scale_order)\n flux0 = scipy.ndimage.zoom(flux0, scale_fact, order=scale_order)\n \n dx = np.mean(np.diff(xgv))\n dy = np.mean(np.diff(ygv))\n x_edges = np.append(xgv - dx/2.0, xgv[-1] + dx/2.0)\n y_edges = np.append(ygv - dy/2.0, ygv[-1] + dy/2.0)\n \n print('Performing histogram...')\n\n flux_image, _, _ = np.histogram2d(phix.flatten(), phiy.flatten(), bins=[x_edges, y_edges], weights=flux0.flatten())\n \n if scale_fact != 1:\n print(\"Descaling...\")\n flux_image = scipy.misc.imresize(flux_image, 1./scale_fact, mode='F')\n\n print('DONE')\n \n return(flux_image)\n\ndef fluximage3(ri, li, rs, v, x, y, N, wBx, wBy, Ntest):\n \"\"\"\n A Monte Carlo approach to creating a flux image out of a perpendicular deflection field. \n \n Args:\n ri:\n li:\n rs:\n v:\n N: Number of protons in reality\n x (array): Perpendicular deflection field x-coordinates.\n y (array): Perpendicular deflection field y-coordinates.\n wBx (array): Perpendicular deflection field x-component.\n wBy (array): Perpendicular deflection field y-component.\n Ntest: Number of test protons (Monte Carlo)\n\n Returns:\n flux_image (array): Generated flux image.\n \"\"\" \n \n # magnify = (rs + ri + li)/(ri)\n magnify = (rs+li+ri)/(ri+.5*li)\n\n xgv = x[:,0].flatten()\n ygv = y[0,:].flatten()\n xmin = np.min(xgv)\n xmax = np.max(xgv)\n ymin = np.min(ygv)\n ymax = np.max(ygv)\n\n dx = np.mean(np.diff(xgv))\n dy = np.mean(np.diff(ygv))\n x_edges = np.append(xgv - dx/2.0, xgv[-1] + dx/2.0)\n y_edges = np.append(ygv - dy/2.0, ygv[-1] + dy/2.0)\n \n # xd: N-element 1d Numpy Array, x positions of particles at deflection plane, in SI units\n # yd: N-element 1d Numpy Array, y positions of particles at deflection plane, in SI units\n xd = np.random.uniform(xmin, xmax, size=(Ntest,))\n yd = np.random.uniform(ymin, ymax, size=(Ntest,))\n \n xyd = np.stack((xd, yd), axis=1)\n #del xd, yd\n \n #wBx_rbv = sp.interpolate.RectBivariateSpline(xgv, ygv, wBx)\n #wBy_rbv = sp.interpolate.RectBivariateSpline(xgv, ygv, wBy)\n #wBxd = wBx_rbv.ev(xd, yd)\n #wByd = wBy_rbv.ev(xd, yd)\n \n wBxd = sp.interpolate.interpn((xgv, ygv), wBx, xyd, method='linear')\n wByd = sp.interpolate.interpn((xgv, ygv), wBy, xyd, method='linear')\n\n xfd = xd + rs/(magnify*v) * wBxd\n yfd = yd + rs/(magnify*v) * wByd\n \n print(\"Histogramming reference...\")\n flux_ref, _, _ = np.histogram2d(xd, yd, bins=[x_edges, y_edges])\n flux_ref = flux_ref * N/Ntest\n \n print(\"Histogramming signal...\")\n flux_image, _, _ = np.histogram2d(xfd, yfd, bins=[x_edges, y_edges])\n flux_image = flux_image * N/Ntest\n\n print('DONE')\n \n return(flux_image, flux_ref)\n\n\n" ]
[ [ "numpy.max", "numpy.histogram2d", "numpy.copy", "numpy.min", "scipy.interpolate.interpn", "numpy.diff", "numpy.random.uniform", "numpy.stack", "numpy.sqrt", "numpy.append", "numpy.linspace", "scipy.interpolate.RegularGridInterpolator", "numpy.meshgrid" ] ]
abhineet123/animal_detection_
[ "be0dd60d2b56b267f329b7be71d7f037499f98bc", "be0dd60d2b56b267f329b7be71d7f037499f98bc", "be0dd60d2b56b267f329b7be71d7f037499f98bc", "be0dd60d2b56b267f329b7be71d7f037499f98bc" ]
[ "tf_api/core/box_predictor_test.py", "tf_api/core/keypoint_ops_test.py", "tf_api/_train.py", "tf_api/core/losses_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.core.box_predictor.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom tf_api.builders import hyperparams_builder\nfrom tf_api.core import box_predictor\nfrom tf_api.protos import hyperparams_pb2\n\n\nclass MaskRCNNBoxPredictorTest(tf.test.TestCase):\n\n def _build_arg_scope_with_hyperparams(self,\n op_type=hyperparams_pb2.Hyperparams.FC):\n hyperparams = hyperparams_pb2.Hyperparams()\n hyperparams_text_proto = \"\"\"\n activation: NONE\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(hyperparams_text_proto, hyperparams)\n hyperparams.op = op_type\n return hyperparams_builder.build(hyperparams, is_training=True)\n\n def test_get_boxes_with_five_classes(self):\n image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)\n mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_arg_scope_with_hyperparams(),\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n )\n box_predictions = mask_box_predictor.predict(\n image_features, num_predictions_per_location=1, scope='BoxPredictor')\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n class_predictions_with_background = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape,\n class_predictions_with_background_shape) = sess.run(\n [tf.shape(box_encodings),\n tf.shape(class_predictions_with_background)])\n self.assertAllEqual(box_encodings_shape, [2, 1, 5, 4])\n self.assertAllEqual(class_predictions_with_background_shape, [2, 1, 6])\n\n def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self):\n with self.assertRaises(ValueError):\n box_predictor.MaskRCNNBoxPredictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_arg_scope_with_hyperparams(),\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n predict_instance_masks=True)\n\n def test_get_instance_masks(self):\n image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)\n mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_arg_scope_with_hyperparams(),\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n conv_hyperparams=self._build_arg_scope_with_hyperparams(\n op_type=hyperparams_pb2.Hyperparams.CONV),\n predict_instance_masks=True)\n box_predictions = mask_box_predictor.predict(\n image_features, num_predictions_per_location=1, scope='BoxPredictor')\n mask_predictions = box_predictions[box_predictor.MASK_PREDICTIONS]\n self.assertListEqual([2, 1, 5, 14, 14],\n mask_predictions.get_shape().as_list())\n\n def test_do_not_return_instance_masks_and_keypoints_without_request(self):\n image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)\n mask_box_predictor = box_predictor.MaskRCNNBoxPredictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_arg_scope_with_hyperparams(),\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4)\n box_predictions = mask_box_predictor.predict(\n image_features, num_predictions_per_location=1, scope='BoxPredictor')\n self.assertEqual(len(box_predictions), 2)\n self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)\n self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND\n in box_predictions)\n\n def test_value_error_on_predict_keypoints(self):\n with self.assertRaises(ValueError):\n box_predictor.MaskRCNNBoxPredictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_arg_scope_with_hyperparams(),\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n predict_keypoints=True)\n\n\nclass RfcnBoxPredictorTest(tf.test.TestCase):\n\n def _build_arg_scope_with_conv_hyperparams(self):\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.build(conv_hyperparams, is_training=True)\n\n def test_get_correct_box_encoding_and_class_prediction_shapes(self):\n image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)\n proposal_boxes = tf.random_normal([4, 2, 4], dtype=tf.float32)\n rfcn_box_predictor = box_predictor.RfcnBoxPredictor(\n is_training=False,\n num_classes=2,\n conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),\n num_spatial_bins=[3, 3],\n depth=4,\n crop_size=[12, 12],\n box_code_size=4\n )\n box_predictions = rfcn_box_predictor.predict(\n image_features, num_predictions_per_location=1, scope='BoxPredictor',\n proposal_boxes=proposal_boxes)\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n class_predictions_with_background = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape,\n class_predictions_shape) = sess.run(\n [tf.shape(box_encodings),\n tf.shape(class_predictions_with_background)])\n self.assertAllEqual(box_encodings_shape, [8, 1, 2, 4])\n self.assertAllEqual(class_predictions_shape, [8, 1, 3])\n\n\nclass ConvolutionalBoxPredictorTest(tf.test.TestCase):\n\n def _build_arg_scope_with_conv_hyperparams(self):\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n activation: RELU_6\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.build(conv_hyperparams, is_training=True)\n\n def test_get_boxes_for_five_aspect_ratios_per_location(self):\n image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)\n conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(\n is_training=False,\n num_classes=0,\n conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n )\n box_predictions = conv_box_predictor.predict(\n image_features, num_predictions_per_location=5, scope='BoxPredictor')\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n objectness_predictions = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape,\n objectness_predictions_shape) = sess.run(\n [tf.shape(box_encodings), tf.shape(objectness_predictions)])\n self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])\n self.assertAllEqual(objectness_predictions_shape, [4, 320, 1])\n\n def test_get_boxes_for_one_aspect_ratio_per_location(self):\n image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)\n conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(\n is_training=False,\n num_classes=0,\n conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n )\n box_predictions = conv_box_predictor.predict(\n image_features, num_predictions_per_location=1, scope='BoxPredictor')\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n objectness_predictions = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape,\n objectness_predictions_shape) = sess.run(\n [tf.shape(box_encodings), tf.shape(objectness_predictions)])\n self.assertAllEqual(box_encodings_shape, [4, 64, 1, 4])\n self.assertAllEqual(objectness_predictions_shape, [4, 64, 1])\n\n def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(\n self):\n num_classes_without_background = 6\n image_features = tf.random_uniform([4, 8, 8, 64], dtype=tf.float32)\n conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(\n is_training=False,\n num_classes=num_classes_without_background,\n conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n )\n box_predictions = conv_box_predictor.predict(\n image_features,\n num_predictions_per_location=5,\n scope='BoxPredictor')\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n class_predictions_with_background = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n\n init_op = tf.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape, class_predictions_with_background_shape\n ) = sess.run([\n tf.shape(box_encodings), tf.shape(class_predictions_with_background)])\n self.assertAllEqual(box_encodings_shape, [4, 320, 1, 4])\n self.assertAllEqual(class_predictions_with_background_shape,\n [4, 320, num_classes_without_background+1])\n\n def test_get_boxes_for_five_aspect_ratios_per_location_fully_convolutional(\n self):\n image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])\n conv_box_predictor = box_predictor.ConvolutionalBoxPredictor(\n is_training=False,\n num_classes=0,\n conv_hyperparams=self._build_arg_scope_with_conv_hyperparams(),\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n )\n box_predictions = conv_box_predictor.predict(\n image_features, num_predictions_per_location=5, scope='BoxPredictor')\n box_encodings = box_predictions[box_predictor.BOX_ENCODINGS]\n objectness_predictions = box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]\n init_op = tf.global_variables_initializer()\n\n resolution = 32\n expected_num_anchors = resolution*resolution*5\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape,\n objectness_predictions_shape) = sess.run(\n [tf.shape(box_encodings), tf.shape(objectness_predictions)],\n feed_dict={image_features:\n np.random.rand(4, resolution, resolution, 64)})\n self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])\n self.assertAllEqual(objectness_predictions_shape,\n [4, expected_num_anchors, 1])\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.core.keypoint_ops.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf_api.core import keypoint_ops\n\n\nclass KeypointOpsTest(tf.test.TestCase):\n \"\"\"Tests for common keypoint operations.\"\"\"\n\n def test_scale(self):\n keypoints = tf.constant([\n [[0.0, 0.0], [100.0, 200.0]],\n [[50.0, 120.0], [100.0, 140.0]]\n ])\n y_scale = tf.constant(1.0 / 100)\n x_scale = tf.constant(1.0 / 200)\n\n expected_keypoints = tf.constant([\n [[0., 0.], [1.0, 1.0]],\n [[0.5, 0.6], [1.0, 0.7]]\n ])\n output = keypoint_ops.scale(keypoints, y_scale, x_scale)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_clip_to_window(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.25], [0.75, 0.75]]\n ])\n output = keypoint_ops.clip_to_window(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_prune_outside_window(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]],\n [[np.nan, np.nan], [np.nan, np.nan]]])\n output = keypoint_ops.prune_outside_window(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_change_coordinate_frame(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([\n [[0, 0.5], [1.0, 1.0]],\n [[0.5, -0.5], [1.5, 1.5]]\n ])\n output = keypoint_ops.change_coordinate_frame(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_normalized_coordinates(self):\n keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n output = keypoint_ops.to_normalized_coordinates(\n keypoints, 40, 60)\n expected_keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_normalized_coordinates_already_normalized(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n output = keypoint_ops.to_normalized_coordinates(\n keypoints, 40, 60)\n\n with self.test_session() as sess:\n with self.assertRaisesOpError('assertion failed'):\n sess.run(output)\n\n def test_to_absolute_coordinates(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n output = keypoint_ops.to_absolute_coordinates(\n keypoints, 40, 60)\n expected_keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_absolute_coordinates_already_absolute(self):\n keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n output = keypoint_ops.to_absolute_coordinates(\n keypoints, 40, 60)\n\n with self.test_session() as sess:\n with self.assertRaisesOpError('assertion failed'):\n sess.run(output)\n\n def test_flip_horizontal(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]\n ])\n flip_permutation = [0, 2, 1]\n\n expected_keypoints = tf.constant([\n [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]],\n [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]],\n ])\n output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_flip_vertical(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]\n ])\n flip_permutation = [0, 2, 1]\n\n expected_keypoints = tf.constant([\n [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],\n [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]],\n ])\n output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_rot90(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]\n ])\n expected_keypoints = tf.constant([\n [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],\n [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]],\n ])\n output = keypoint_ops.rot90(keypoints)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Training executable for detection models.\n\nThis executable is used to train DetectionModels. There are two ways of\nconfiguring the training job:\n\n1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file\ncan be specified by --pipeline_config_path.\n\nExample usage:\n ./train \\\n --logtostderr \\\n --train_dir=path/to/train_dir \\\n --pipeline_config_path=pipeline_config.pbtxt\n\n2) Three configuration files can be provided: a model_pb2.DetectionModel\nconfiguration file to define what type of DetectionModel is being trained, an\ninput_reader_pb2.InputReader file to specify what training data will be used and\na train_pb2.TrainConfig file to configure training parameters.\n\nExample usage:\n ./train \\\n --logtostderr \\\n --train_dir=path/to/train_dir \\\n --model_config_path=model_config.pbtxt \\\n --train_config_path=train_config.pbtxt \\\n --input_config_path=train_input_config.pbtxt\n\"\"\"\n\nimport sys\ntry:\n sys.path.remove('/home/abhineet/labelling_tool/object_detection_module')\n sys.path.remove('/home/abhineet/labelling_tool/object_detection_module/object_detection')\nexcept:\n pass\n\ntry:\n sys.path.remove('/home/abhineet/617_w18/Assignment2/models/research/object_detection')\nexcept:\n pass\n # print('could not remove /home/abhineet/617_w18/Assignment2/models/research/object_detection')\n\ntry:\n sys.path.remove('/home/abhineet/617_w18/Assignment2/models/research')\nexcept:\n pass\n # print('could not remove /home/abhineet/617_w18/Assignment2/models/research')\nsys.path.append(\"..\")\n\nimport functools\nimport json\nimport os\nimport tensorflow as tf\n\nfrom tf_api import trainer\nfrom tf_api.builders import input_reader_builder\nfrom tf_api.builders import model_builder\nfrom tf_api.utils import config_util\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nflags = tf.app.flags\nflags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')\nflags.DEFINE_integer('task', 0, 'task id')\nflags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')\nflags.DEFINE_boolean('clone_on_cpu', False,\n 'Force clones to be deployed on CPU. Note that even if '\n 'set to False (allowing ops to run on gpu), some ops may '\n 'still be run on the CPU if they have no GPU kernel.')\nflags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '\n 'replicas.')\nflags.DEFINE_integer('ps_tasks', 0,\n 'Number of parameter server tasks. If None, does not use '\n 'a parameter server.')\nflags.DEFINE_string('train_dir', '',\n 'Directory to save the checkpoints and training summaries.')\n\nflags.DEFINE_string('pipeline_config_path', '',\n 'Path to a pipeline_pb2.TrainEvalPipelineConfig config '\n 'file. If provided, other configs are ignored')\n\nflags.DEFINE_string('train_config_path', '',\n 'Path to a train_pb2.TrainConfig config file.')\nflags.DEFINE_string('input_config_path', '',\n 'Path to an input_reader_pb2.InputReader config file.')\nflags.DEFINE_string('model_config_path', '',\n 'Path to a model_pb2.DetectionModel config file.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n assert FLAGS.train_dir, '`train_dir` is missing.'\n if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)\n if FLAGS.pipeline_config_path:\n configs = config_util.get_configs_from_pipeline_file(\n FLAGS.pipeline_config_path)\n if FLAGS.task == 0:\n tf.gfile.Copy(FLAGS.pipeline_config_path,\n os.path.join(FLAGS.train_dir, 'pipeline.config'),\n overwrite=True)\n else:\n configs = config_util.get_configs_from_multiple_files(\n model_config_path=FLAGS.model_config_path,\n train_config_path=FLAGS.train_config_path,\n train_input_config_path=FLAGS.input_config_path)\n if FLAGS.task == 0:\n for name, config in [('model.config', FLAGS.model_config_path),\n ('train.config', FLAGS.train_config_path),\n ('input.config', FLAGS.input_config_path)]:\n tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),\n overwrite=True)\n\n model_config = configs['model']\n train_config = configs['train_config']\n input_config = configs['train_input_config']\n\n model_fn = functools.partial(\n model_builder.build,\n model_config=model_config,\n is_training=True)\n\n create_input_dict_fn = functools.partial(\n input_reader_builder.build, input_config)\n\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_data = env.get('cluster', None)\n cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None\n task_data = env.get('task', None) or {'type': 'master', 'index': 0}\n task_info = type('TaskSpec', (object,), task_data)\n\n # Parameters for a single worker.\n ps_tasks = 0\n worker_replicas = 1\n worker_job_name = 'lonely_worker'\n task = 0\n is_chief = True\n master = ''\n\n if cluster_data and 'worker' in cluster_data:\n # Number of total worker replicas include \"worker\"s and the \"master\".\n worker_replicas = len(cluster_data['worker']) + 1\n if cluster_data and 'ps' in cluster_data:\n ps_tasks = len(cluster_data['ps'])\n\n if worker_replicas > 1 and ps_tasks < 1:\n raise ValueError('At least 1 ps task is needed for distributed training.')\n\n if worker_replicas >= 1 and ps_tasks > 0:\n # Set up distributed training.\n server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',\n job_name=task_info.type,\n task_index=task_info.index)\n if task_info.type == 'ps':\n server.join()\n return\n\n worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)\n task = task_info.index\n is_chief = (task_info.type == 'master')\n master = server.target\n\n trainer.train(create_input_dict_fn, model_fn, train_config, master, task,\n FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks,\n worker_job_name, is_chief, FLAGS.train_dir)\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for google3.research.vale.object_detection.losses.\"\"\"\nimport math\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf_api.core import box_list\nfrom tf_api.core import losses\nfrom tf_api.core import matcher\n\n\nclass WeightedL2LocalizationLossTest(tf.test.TestCase):\n\n def testReturnsCorrectLoss(self):\n batch_size = 3\n num_anchors = 10\n code_size = 4\n prediction_tensor = tf.ones([batch_size, num_anchors, code_size])\n target_tensor = tf.zeros([batch_size, num_anchors, code_size])\n weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)\n loss_op = losses.WeightedL2LocalizationLoss()\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n expected_loss = (3 * 5 * 4) / 2.0\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, expected_loss)\n\n def testReturnsCorrectAnchorwiseLoss(self):\n batch_size = 3\n num_anchors = 16\n code_size = 4\n prediction_tensor = tf.ones([batch_size, num_anchors, code_size])\n target_tensor = tf.zeros([batch_size, num_anchors, code_size])\n weights = tf.ones([batch_size, num_anchors])\n loss_op = losses.WeightedL2LocalizationLoss(anchorwise_output=True)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n expected_loss = np.ones((batch_size, num_anchors)) * 2\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, expected_loss)\n\n def testReturnsCorrectLossSum(self):\n batch_size = 3\n num_anchors = 16\n code_size = 4\n prediction_tensor = tf.ones([batch_size, num_anchors, code_size])\n target_tensor = tf.zeros([batch_size, num_anchors, code_size])\n weights = tf.ones([batch_size, num_anchors])\n loss_op = losses.WeightedL2LocalizationLoss(anchorwise_output=False)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n expected_loss = tf.nn.l2_loss(prediction_tensor - target_tensor)\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n expected_loss_output = sess.run(expected_loss)\n self.assertAllClose(loss_output, expected_loss_output)\n\n def testReturnsCorrectNanLoss(self):\n batch_size = 3\n num_anchors = 10\n code_size = 4\n prediction_tensor = tf.ones([batch_size, num_anchors, code_size])\n target_tensor = tf.concat([\n tf.zeros([batch_size, num_anchors, code_size / 2]),\n tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan\n ], axis=2)\n weights = tf.ones([batch_size, num_anchors])\n loss_op = losses.WeightedL2LocalizationLoss()\n loss = loss_op(prediction_tensor, target_tensor, weights=weights,\n ignore_nan_targets=True)\n\n expected_loss = (3 * 5 * 4) / 2.0\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, expected_loss)\n\n\nclass WeightedSmoothL1LocalizationLossTest(tf.test.TestCase):\n\n def testReturnsCorrectLoss(self):\n batch_size = 2\n num_anchors = 3\n code_size = 4\n prediction_tensor = tf.constant([[[2.5, 0, .4, 0],\n [0, 0, 0, 0],\n [0, 2.5, 0, .4]],\n [[3.5, 0, 0, 0],\n [0, .4, 0, .9],\n [0, 0, 1.5, 0]]], tf.float32)\n target_tensor = tf.zeros([batch_size, num_anchors, code_size])\n weights = tf.constant([[2, 1, 1],\n [0, 3, 0]], tf.float32)\n loss_op = losses.WeightedSmoothL1LocalizationLoss()\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n exp_loss = 7.695\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n\nclass WeightedIOULocalizationLossTest(tf.test.TestCase):\n\n def testReturnsCorrectLoss(self):\n prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],\n [0, 0, 1, 1],\n [0, 0, .5, .25]]])\n target_tensor = tf.constant([[[1.5, 0, 2.4, 1],\n [0, 0, 1, 1],\n [50, 50, 500.5, 100.25]]])\n weights = [[1.0, .5, 2.0]]\n loss_op = losses.WeightedIOULocalizationLoss()\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n exp_loss = 2.0\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n\nclass WeightedSigmoidClassificationLossTest(tf.test.TestCase):\n\n def testReturnsCorrectLoss(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [100, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n loss_op = losses.WeightedSigmoidClassificationLoss()\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n exp_loss = -2 * math.log(.5)\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n def testReturnsCorrectAnchorWiseLoss(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [100, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n loss_op = losses.WeightedSigmoidClassificationLoss(True)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n exp_loss = np.matrix([[0, 0, -math.log(.5), 0],\n [-math.log(.5), 0, 0, 0]])\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n def testReturnsCorrectLossWithClassIndices(self):\n prediction_tensor = tf.constant([[[-100, 100, -100, 100],\n [100, -100, -100, -100],\n [100, 0, -100, 100],\n [-100, -100, 100, -100]],\n [[-100, 0, 100, 100],\n [-100, 100, -100, 100],\n [100, 100, 100, 100],\n [0, 0, -1, 100]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0, 0],\n [1, 0, 0, 1],\n [1, 0, 0, 0],\n [0, 0, 1, 1]],\n [[0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 1, 1, 0],\n [1, 0, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n # Ignores the last class.\n class_indices = tf.constant([0, 1, 2], tf.int32)\n loss_op = losses.WeightedSigmoidClassificationLoss(True)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights,\n class_indices=class_indices)\n\n exp_loss = np.matrix([[0, 0, -math.log(.5), 0],\n [-math.log(.5), 0, 0, 0]])\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n\ndef _logit(probability):\n return math.log(probability / (1. - probability))\n\n\nclass SigmoidFocalClassificationLossTest(tf.test.TestCase):\n\n def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):\n prediction_tensor = tf.constant([[[_logit(0.97)],\n [_logit(0.90)],\n [_logit(0.73)],\n [_logit(0.27)],\n [_logit(0.09)],\n [_logit(0.03)]]], tf.float32)\n target_tensor = tf.constant([[[1],\n [1],\n [1],\n [0],\n [0],\n [0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=True, gamma=2.0, alpha=None)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n order_of_ratio = np.power(10,\n np.floor(np.log10(sigmoid_loss / focal_loss)))\n self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])\n\n def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):\n prediction_tensor = tf.constant([[[_logit(0.55)],\n [_logit(0.52)],\n [_logit(0.50)],\n [_logit(0.48)],\n [_logit(0.45)]]], tf.float32)\n target_tensor = tf.constant([[[1],\n [1],\n [1],\n [0],\n [0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=True, gamma=2.0, alpha=None)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n order_of_ratio = np.power(10,\n np.floor(np.log10(sigmoid_loss / focal_loss)))\n self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])\n\n def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):\n prediction_tensor = tf.constant([[[_logit(0.55)],\n [_logit(0.52)],\n [_logit(0.50)],\n [_logit(0.48)],\n [_logit(0.45)]]], tf.float32)\n target_tensor = tf.constant([[[1],\n [1],\n [1],\n [0],\n [0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=False, gamma=2.0, alpha=None)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=False)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n order_of_ratio = np.power(10,\n np.floor(np.log10(sigmoid_loss / focal_loss)))\n self.assertAlmostEqual(order_of_ratio, 1.)\n\n def testIgnoreNegativeExampleLossViaAlphaMultiplier(self):\n prediction_tensor = tf.constant([[[_logit(0.55)],\n [_logit(0.52)],\n [_logit(0.50)],\n [_logit(0.48)],\n [_logit(0.45)]]], tf.float32)\n target_tensor = tf.constant([[[1],\n [1],\n [1],\n [0],\n [0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=True, gamma=2.0, alpha=1.0)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n self.assertAllClose(focal_loss[0][3:], [0., 0.])\n order_of_ratio = np.power(10,\n np.floor(np.log10(sigmoid_loss[0][:3] /\n focal_loss[0][:3])))\n self.assertAllClose(order_of_ratio, [1., 1., 1.])\n\n def testIgnorePositiveExampleLossViaAlphaMultiplier(self):\n prediction_tensor = tf.constant([[[_logit(0.55)],\n [_logit(0.52)],\n [_logit(0.50)],\n [_logit(0.48)],\n [_logit(0.45)]]], tf.float32)\n target_tensor = tf.constant([[[1],\n [1],\n [1],\n [0],\n [0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=True, gamma=2.0, alpha=0.0)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])\n order_of_ratio = np.power(10,\n np.floor(np.log10(sigmoid_loss[0][3:] /\n focal_loss[0][3:])))\n self.assertAllClose(order_of_ratio, [1., 1.])\n\n def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [100, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=True, alpha=0.5, gamma=0.0)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n self.assertAllClose(sigmoid_loss, focal_loss * 2)\n\n def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [100, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=True, alpha=None, gamma=0.0)\n sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(\n anchorwise_output=True)\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n\n with self.test_session() as sess:\n sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])\n self.assertAllClose(sigmoid_loss, focal_loss)\n\n def testExpectedLossWithAlphaOneAndZeroGamma(self):\n # All zeros correspond to 0.5 probability.\n prediction_tensor = tf.constant([[[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=False, alpha=1.0, gamma=0.0)\n\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n with self.test_session() as sess:\n focal_loss = sess.run(focal_loss)\n self.assertAllClose(\n (-math.log(.5) * # x-entropy per class per anchor\n 1.0 * # alpha\n 8), # positives from 8 anchors\n focal_loss)\n\n def testExpectedLossWithAlpha75AndZeroGamma(self):\n # All zeros correspond to 0.5 probability.\n prediction_tensor = tf.constant([[[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]],\n [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 0, 0],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 1]], tf.float32)\n focal_loss_op = losses.SigmoidFocalClassificationLoss(\n anchorwise_output=False, alpha=0.75, gamma=0.0)\n\n focal_loss = focal_loss_op(prediction_tensor, target_tensor,\n weights=weights)\n with self.test_session() as sess:\n focal_loss = sess.run(focal_loss)\n self.assertAllClose(\n (-math.log(.5) * # x-entropy per class per anchor.\n ((0.75 * # alpha for positives.\n 8) + # positives from 8 anchors.\n (0.25 * # alpha for negatives.\n 8 * 2))), # negatives from 8 anchors for two classes.\n focal_loss)\n\n\nclass WeightedSoftmaxClassificationLossTest(tf.test.TestCase):\n\n def testReturnsCorrectLoss(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [0, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 0],\n [-100, 100, -100],\n [-100, 100, -100],\n [100, -100, -100]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, .5, 1],\n [1, 1, 1, 0]], tf.float32)\n loss_op = losses.WeightedSoftmaxClassificationLoss()\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n exp_loss = - 1.5 * math.log(.5)\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n def testReturnsCorrectAnchorWiseLoss(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [0, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 0],\n [-100, 100, -100],\n [-100, 100, -100],\n [100, -100, -100]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, .5, 1],\n [1, 1, 1, 0]], tf.float32)\n loss_op = losses.WeightedSoftmaxClassificationLoss(True)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],\n [-math.log(.5), 0, 0, 0]])\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self):\n \"\"\"At very high logit_scale, all predictions will be ~0.33.\"\"\"\n # TODO(yonib): Also test logit_scale with anchorwise=False.\n logit_scale = 10e16\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [0, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 0],\n [-100, 100, -100],\n [-100, 100, -100],\n [100, -100, -100]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 1]], tf.float32)\n loss_op = losses.WeightedSoftmaxClassificationLoss(\n anchorwise_output=True, logit_scale=logit_scale)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n uniform_distribution_loss = - math.log(.33333333333)\n exp_loss = np.matrix([[uniform_distribution_loss] * 4,\n [uniform_distribution_loss] * 4])\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n\nclass BootstrappedSigmoidClassificationLossTest(tf.test.TestCase):\n\n def testReturnsCorrectLossSoftBootstrapping(self):\n prediction_tensor = tf.constant([[[-100, 100, 0],\n [100, -100, -100],\n [100, -100, -100],\n [-100, -100, 100]],\n [[-100, -100, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n alpha = tf.constant(.5, tf.float32)\n loss_op = losses.BootstrappedSigmoidClassificationLoss(\n alpha, bootstrap_type='soft')\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n exp_loss = -math.log(.5)\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n def testReturnsCorrectLossHardBootstrapping(self):\n prediction_tensor = tf.constant([[[-100, 100, 0],\n [100, -100, -100],\n [100, -100, -100],\n [-100, -100, 100]],\n [[-100, -100, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n alpha = tf.constant(.5, tf.float32)\n loss_op = losses.BootstrappedSigmoidClassificationLoss(\n alpha, bootstrap_type='hard')\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n exp_loss = -math.log(.5)\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n def testReturnsCorrectAnchorWiseLoss(self):\n prediction_tensor = tf.constant([[[-100, 100, -100],\n [100, -100, -100],\n [100, 0, -100],\n [-100, -100, 100]],\n [[-100, 0, 100],\n [-100, 100, -100],\n [100, 100, 100],\n [0, 0, -1]]], tf.float32)\n target_tensor = tf.constant([[[0, 1, 0],\n [1, 0, 0],\n [1, 0, 0],\n [0, 0, 1]],\n [[0, 0, 1],\n [0, 1, 0],\n [1, 1, 1],\n [1, 0, 0]]], tf.float32)\n weights = tf.constant([[1, 1, 1, 1],\n [1, 1, 1, 0]], tf.float32)\n alpha = tf.constant(.5, tf.float32)\n loss_op = losses.BootstrappedSigmoidClassificationLoss(\n alpha, bootstrap_type='hard', anchorwise_output=True)\n loss = loss_op(prediction_tensor, target_tensor, weights=weights)\n\n exp_loss = np.matrix([[0, 0, -math.log(.5), 0],\n [-math.log(.5), 0, 0, 0]])\n with self.test_session() as sess:\n loss_output = sess.run(loss)\n self.assertAllClose(loss_output, exp_loss)\n\n\nclass HardExampleMinerTest(tf.test.TestCase):\n\n def testHardMiningWithSingleLossType(self):\n location_losses = tf.constant([[100, 90, 80, 0],\n [0, 1, 2, 3]], tf.float32)\n cls_losses = tf.constant([[0, 10, 50, 110],\n [9, 6, 3, 0]], tf.float32)\n box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9]], tf.float32)\n decoded_boxlist_list = []\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n # Uses only location loss to select hard examples\n loss_op = losses.HardExampleMiner(num_hard_examples=1,\n iou_threshold=0.0,\n loss_type='loc',\n cls_loss_weight=1,\n loc_loss_weight=1)\n (loc_loss, cls_loss) = loss_op(location_losses, cls_losses,\n decoded_boxlist_list)\n exp_loc_loss = 100 + 3\n exp_cls_loss = 0 + 0\n with self.test_session() as sess:\n loc_loss_output = sess.run(loc_loss)\n self.assertAllClose(loc_loss_output, exp_loc_loss)\n cls_loss_output = sess.run(cls_loss)\n self.assertAllClose(cls_loss_output, exp_cls_loss)\n\n def testHardMiningWithBothLossType(self):\n location_losses = tf.constant([[100, 90, 80, 0],\n [0, 1, 2, 3]], tf.float32)\n cls_losses = tf.constant([[0, 10, 50, 110],\n [9, 6, 3, 0]], tf.float32)\n box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9]], tf.float32)\n decoded_boxlist_list = []\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n loss_op = losses.HardExampleMiner(num_hard_examples=1,\n iou_threshold=0.0,\n loss_type='both',\n cls_loss_weight=1,\n loc_loss_weight=1)\n (loc_loss, cls_loss) = loss_op(location_losses, cls_losses,\n decoded_boxlist_list)\n exp_loc_loss = 80 + 0\n exp_cls_loss = 50 + 9\n with self.test_session() as sess:\n loc_loss_output = sess.run(loc_loss)\n self.assertAllClose(loc_loss_output, exp_loc_loss)\n cls_loss_output = sess.run(cls_loss)\n self.assertAllClose(cls_loss_output, exp_cls_loss)\n\n def testHardMiningNMS(self):\n location_losses = tf.constant([[100, 90, 80, 0],\n [0, 1, 2, 3]], tf.float32)\n cls_losses = tf.constant([[0, 10, 50, 110],\n [9, 6, 3, 0]], tf.float32)\n box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],\n [0.9, 0.9, 0.99, 0.99],\n [0.1, 0.1, 0.9, 0.9],\n [0.1, 0.1, 0.9, 0.9]], tf.float32)\n decoded_boxlist_list = []\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n loss_op = losses.HardExampleMiner(num_hard_examples=2,\n iou_threshold=0.5,\n loss_type='cls',\n cls_loss_weight=1,\n loc_loss_weight=1)\n (loc_loss, cls_loss) = loss_op(location_losses, cls_losses,\n decoded_boxlist_list)\n exp_loc_loss = 0 + 90 + 0 + 1\n exp_cls_loss = 110 + 10 + 9 + 6\n with self.test_session() as sess:\n loc_loss_output = sess.run(loc_loss)\n self.assertAllClose(loc_loss_output, exp_loc_loss)\n cls_loss_output = sess.run(cls_loss)\n self.assertAllClose(cls_loss_output, exp_cls_loss)\n\n def testEnforceNegativesPerPositiveRatio(self):\n location_losses = tf.constant([[100, 90, 80, 0, 1, 2,\n 3, 10, 20, 100, 20, 3]], tf.float32)\n cls_losses = tf.constant([[0, 0, 100, 0, 90, 70,\n 0, 60, 0, 17, 13, 0]], tf.float32)\n box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.5, 0.1],\n [0.0, 0.0, 0.6, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.8, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 1.0, 0.1],\n [0.0, 0.0, 1.1, 0.1],\n [0.0, 0.0, 0.2, 0.1]], tf.float32)\n match_results = tf.constant([2, -1, 0, -1, -1, 1, -1, -1, -1, -1, -1, 3])\n match_list = [matcher.Match(match_results)]\n decoded_boxlist_list = []\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n\n max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10]\n exp_loc_loss_list = [80 + 2,\n 80 + 1 + 2,\n 80 + 1 + 2 + 10,\n 80 + 1 + 2 + 10 + 100,\n 80 + 1 + 2 + 10 + 100 + 20]\n exp_cls_loss_list = [100 + 70,\n 100 + 90 + 70,\n 100 + 90 + 70 + 60,\n 100 + 90 + 70 + 60 + 17,\n 100 + 90 + 70 + 60 + 17 + 13]\n\n for max_negatives_per_positive, exp_loc_loss, exp_cls_loss in zip(\n max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list):\n loss_op = losses.HardExampleMiner(\n num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',\n cls_loss_weight=1, loc_loss_weight=1,\n max_negatives_per_positive=max_negatives_per_positive)\n (loc_loss, cls_loss) = loss_op(location_losses, cls_losses,\n decoded_boxlist_list, match_list)\n loss_op.summarize()\n\n with self.test_session() as sess:\n loc_loss_output = sess.run(loc_loss)\n self.assertAllClose(loc_loss_output, exp_loc_loss)\n cls_loss_output = sess.run(cls_loss)\n self.assertAllClose(cls_loss_output, exp_cls_loss)\n\n def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self):\n location_losses = tf.constant([[100, 90, 80, 0, 1, 2,\n 3, 10, 20, 100, 20, 3]], tf.float32)\n cls_losses = tf.constant([[0, 0, 100, 0, 90, 70,\n 0, 60, 0, 17, 13, 0]], tf.float32)\n box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.5, 0.1],\n [0.0, 0.0, 0.6, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 0.8, 0.1],\n [0.0, 0.0, 0.2, 0.1],\n [0.0, 0.0, 1.0, 0.1],\n [0.0, 0.0, 1.1, 0.1],\n [0.0, 0.0, 0.2, 0.1]], tf.float32)\n match_results = tf.constant([-1] * 12)\n match_list = [matcher.Match(match_results)]\n decoded_boxlist_list = []\n decoded_boxlist_list.append(box_list.BoxList(box_corners))\n\n min_negatives_per_image_list = [0, 1, 2, 4, 5, 6]\n exp_loc_loss_list = [0,\n 80,\n 80 + 1,\n 80 + 1 + 2 + 10,\n 80 + 1 + 2 + 10 + 100,\n 80 + 1 + 2 + 10 + 100 + 20]\n exp_cls_loss_list = [0,\n 100,\n 100 + 90,\n 100 + 90 + 70 + 60,\n 100 + 90 + 70 + 60 + 17,\n 100 + 90 + 70 + 60 + 17 + 13]\n\n for min_negatives_per_image, exp_loc_loss, exp_cls_loss in zip(\n min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list):\n loss_op = losses.HardExampleMiner(\n num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',\n cls_loss_weight=1, loc_loss_weight=1,\n max_negatives_per_positive=3,\n min_negatives_per_image=min_negatives_per_image)\n (loc_loss, cls_loss) = loss_op(location_losses, cls_losses,\n decoded_boxlist_list, match_list)\n with self.test_session() as sess:\n loc_loss_output = sess.run(loc_loss)\n self.assertAllClose(loc_loss_output, exp_loc_loss)\n cls_loss_output = sess.run(cls_loss)\n self.assertAllClose(cls_loss_output, exp_cls_loss)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.shape", "numpy.random.rand", "tensorflow.random_uniform", "tensorflow.placeholder", "tensorflow.test.main", "tensorflow.global_variables_initializer", "tensorflow.random_normal" ], [ "tensorflow.constant", "tensorflow.test.main" ], [ "tensorflow.app.run", "tensorflow.logging.set_verbosity", "tensorflow.gfile.MakeDirs", "tensorflow.train.ClusterSpec" ], [ "numpy.matrix", "tensorflow.zeros", "tensorflow.ones", "numpy.ones", "tensorflow.nn.l2_loss", "tensorflow.constant", "tensorflow.test.main", "numpy.log10" ] ]
TangleSpace/pytorch-widedeep
[ "ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff", "ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff", "ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff", "ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff", "ccc55a15c1b3205ffc8c054abc5cd25cba9ccdff" ]
[ "pytorch_widedeep/models/tabular/transformers/saint.py", "pytorch_widedeep/models/text/stacked_attentive_rnn.py", "pytorch_widedeep/models/tabular/transformers/_encoders.py", "tests/test_model_components/test_mc_tab_mlp.py", "pytorch_widedeep/models/tabular/tabnet/tab_net.py" ]
[ "from torch import nn\n\nfrom pytorch_widedeep.wdtypes import * # noqa: F403\nfrom pytorch_widedeep.models.tabular.mlp._layers import MLP\nfrom pytorch_widedeep.models.tabular._base_tabular_model import (\n BaseTabularModelWithAttention,\n)\nfrom pytorch_widedeep.models.tabular.transformers._encoders import SaintEncoder\n\n\nclass SAINT(BaseTabularModelWithAttention):\n r\"\"\"Defines a `SAINT model <https://arxiv.org/abs/2106.01342>`_ that\n can be used as the ``deeptabular`` component of a Wide & Deep model or\n independently by itself.\n\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the model. Required to slice the tensors. e.g.\n {'education': 0, 'relationship': 1, 'workclass': 2, ...}\n cat_embed_input: List, Optional, default = None\n List of Tuples with the column name and number of unique values and\n embedding dimension. e.g. [(education, 11), ...]\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n use_cat_bias: bool, default = False,\n Boolean indicating if bias will be used for the categorical embeddings\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. `'tanh'`,\n `'relu'`, `'leaky_relu'` and `'gelu'` are supported.\n full_embed_dropout: bool, default = False\n Boolean indicating if an entire embedding (i.e. the representation of\n one column) will be dropped in the batch. See:\n :obj:`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.\n If ``full_embed_dropout = True``, ``cat_embed_dropout`` is ignored.\n shared_embed: bool, default = False\n The idea behind ``shared_embed`` is described in the Appendix A in the\n `TabTransformer paper <https://arxiv.org/abs/2012.06678>`_: `'The\n goal of having column embedding is to enable the model to distinguish\n the classes in one column from those in the other columns'`. In other\n words, the idea is to let the model learn which column is embedded\n at the time.\n add_shared_embed: bool, default = False\n The two embedding sharing strategies are: 1) add the shared embeddings\n to the column embeddings or 2) to replace the first\n ``frac_shared_embed`` with the shared embeddings.\n See :obj:`pytorch_widedeep.models.transformers._layers.SharedEmbeddings`\n frac_shared_embed: float, default = 0.25\n The fraction of embeddings that will be shared (if ``add_shared_embed\n = False``) by all the different categories for one particular\n column.\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = \"batchnorm\"\n Type of normalization layer applied to the continuous features. Options\n are: 'layernorm', 'batchnorm' or None.\n cont_embed_dropout: float, default = 0.1,\n Continuous embeddings dropout\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: str, default = None\n Activation function to be applied to the continuous embeddings, if\n any. `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported.\n input_dim: int, default = 32\n The so-called *dimension of the model*. Is the number of\n embeddings used to encode the categorical and/or continuous columns\n n_heads: int, default = 8\n Number of attention heads per Transformer block\n use_qkv_bias: bool, default = False\n Boolean indicating whether or not to use bias in the Q, K, and V\n projection layers\n n_blocks: int, default = 2\n Number of SAINT-Transformer blocks. 1 in the paper.\n attn_dropout: float, default = 0.2\n Dropout that will be applied to the Multi-Head Attention column and\n row layers\n ff_dropout: float, default = 0.1\n Dropout that will be applied to the FeedForward network\n transformer_activation: str, default = \"gelu\"\n Transformer Encoder activation function. `'tanh'`, `'relu'`,\n `'leaky_relu'`, `'gelu'`, `'geglu'` and `'reglu'` are supported\n mlp_hidden_dims: List, Optional, default = None\n MLP hidden dimensions. If not provided it will default to ``[l, 4*l,\n 2*l]`` where ``l`` is the MLP's input dimension\n mlp_activation: str, default = \"relu\"\n MLP activation function. `'tanh'`, `'relu'`, `'leaky_relu'` and\n `'gelu'` are supported\n mlp_dropout: float, default = 0.1\n Dropout that will be applied to the final MLP\n mlp_batchnorm: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n dense layers\n mlp_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers\n mlp_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->\n LIN -> ACT]``\n\n Attributes\n ----------\n cat_and_cont_embed: ``nn.Module``\n This is the module that processes the categorical and continuous columns\n saint_blks: ``nn.Sequential``\n Sequence of SAINT-Transformer blocks\n saint_mlp: ``nn.Module``\n MLP component in the model\n output_dim: int\n The output dimension of the model. This is a required attribute\n neccesary to build the ``WideDeep`` class\n\n Example\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import SAINT\n >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = ['a', 'b', 'c', 'd', 'e']\n >>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]\n >>> continuous_cols = ['e']\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = SAINT(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols)\n >>> out = model(X_tab)\n \"\"\"\n\n def __init__(\n self,\n column_idx: Dict[str, int],\n cat_embed_input: Optional[List[Tuple[str, int]]] = None,\n cat_embed_dropout: float = 0.1,\n use_cat_bias: bool = False,\n cat_embed_activation: Optional[str] = None,\n full_embed_dropout: bool = False,\n shared_embed: bool = False,\n add_shared_embed: bool = False,\n frac_shared_embed: float = 0.25,\n continuous_cols: Optional[List[str]] = None,\n cont_norm_layer: str = None,\n cont_embed_dropout: float = 0.1,\n use_cont_bias: bool = True,\n cont_embed_activation: Optional[str] = None,\n input_dim: int = 32,\n use_qkv_bias: bool = False,\n n_heads: int = 8,\n n_blocks: int = 2,\n attn_dropout: float = 0.1,\n ff_dropout: float = 0.2,\n transformer_activation: str = \"gelu\",\n mlp_hidden_dims: Optional[List[int]] = None,\n mlp_activation: str = \"relu\",\n mlp_dropout: float = 0.1,\n mlp_batchnorm: bool = False,\n mlp_batchnorm_last: bool = False,\n mlp_linear_first: bool = True,\n ):\n super(SAINT, self).__init__(\n column_idx=column_idx,\n cat_embed_input=cat_embed_input,\n cat_embed_dropout=cat_embed_dropout,\n use_cat_bias=use_cat_bias,\n cat_embed_activation=cat_embed_activation,\n full_embed_dropout=full_embed_dropout,\n shared_embed=shared_embed,\n add_shared_embed=add_shared_embed,\n frac_shared_embed=frac_shared_embed,\n continuous_cols=continuous_cols,\n cont_norm_layer=cont_norm_layer,\n embed_continuous=True,\n cont_embed_dropout=cont_embed_dropout,\n use_cont_bias=use_cont_bias,\n cont_embed_activation=cont_embed_activation,\n input_dim=input_dim,\n )\n\n self.column_idx = column_idx\n self.cat_embed_input = cat_embed_input\n self.cat_embed_dropout = cat_embed_dropout\n self.full_embed_dropout = full_embed_dropout\n self.shared_embed = shared_embed\n self.add_shared_embed = add_shared_embed\n self.frac_shared_embed = frac_shared_embed\n\n self.continuous_cols = continuous_cols\n self.cont_embed_activation = cont_embed_activation\n self.cont_embed_dropout = cont_embed_dropout\n self.cont_norm_layer = cont_norm_layer\n\n self.input_dim = input_dim\n self.use_qkv_bias = use_qkv_bias\n self.n_heads = n_heads\n self.n_blocks = n_blocks\n self.attn_dropout = attn_dropout\n self.ff_dropout = ff_dropout\n self.transformer_activation = transformer_activation\n\n self.mlp_hidden_dims = mlp_hidden_dims\n self.mlp_activation = mlp_activation\n self.mlp_dropout = mlp_dropout\n self.mlp_batchnorm = mlp_batchnorm\n self.mlp_batchnorm_last = mlp_batchnorm_last\n self.mlp_linear_first = mlp_linear_first\n\n self.with_cls_token = \"cls_token\" in column_idx\n self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0\n self.n_cont = len(continuous_cols) if continuous_cols is not None else 0\n self.n_feats = self.n_cat + self.n_cont\n\n # Embeddings are instantiated at the base model\n # Transformer blocks\n self.saint_blks = nn.Sequential()\n for i in range(n_blocks):\n self.saint_blks.add_module(\n \"saint_block\" + str(i),\n SaintEncoder(\n input_dim,\n n_heads,\n use_qkv_bias,\n attn_dropout,\n ff_dropout,\n transformer_activation,\n self.n_feats,\n ),\n )\n\n attn_output_dim = (\n self.input_dim if self.with_cls_token else self.n_feats * self.input_dim\n )\n\n # Mlp\n if not mlp_hidden_dims:\n mlp_hidden_dims = [\n attn_output_dim,\n attn_output_dim * 4,\n attn_output_dim * 2,\n ]\n else:\n mlp_hidden_dims = [attn_output_dim] + mlp_hidden_dims\n\n self.saint_mlp = MLP(\n mlp_hidden_dims,\n mlp_activation,\n mlp_dropout,\n mlp_batchnorm,\n mlp_batchnorm_last,\n mlp_linear_first,\n )\n\n # the output_dim attribute will be used as input_dim when \"merging\" the models\n self.output_dim: int = mlp_hidden_dims[-1]\n\n def forward(self, X: Tensor) -> Tensor:\n x = self._get_embeddings(X)\n x = self.saint_blks(x)\n if self.with_cls_token:\n x = x[:, 0, :]\n else:\n x = x.flatten(1)\n return self.saint_mlp(x)\n\n @property\n def attention_weights(self) -> List:\n r\"\"\"List with the attention weights. Each element of the list is a tuple\n where the first and the second elements are the column and row\n attention weights respectively\n\n The shape of the attention weights is:\n\n - column attention: :math:`(N, H, F, F)`\n\n - row attention: :math:`(1, H, N, N)`\n\n where *N* is the batch size, *H* is the number of heads and *F* is the\n number of features/columns in the dataset\n \"\"\"\n attention_weights = []\n for blk in self.saint_blks:\n attention_weights.append(\n (blk.col_attn.attn_weights, blk.row_attn.attn_weights)\n )\n return attention_weights\n", "import warnings\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom pytorch_widedeep.wdtypes import * # noqa: F403\nfrom pytorch_widedeep.models.text._encoders import ContextAttentionEncoder\nfrom pytorch_widedeep.models.tabular.mlp._layers import MLP\n\n\nclass StackedAttentiveRNN(nn.Module):\n r\"\"\"Text classifier/regressor comprised by a stack of blocks:\n ``[RNN + Attention]``. This can be used as the ``deeptext`` component of a\n Wide & Deep model or independently by itself.\n\n In addition, there is the option to add a Fully Connected (FC) set of\n dense layers on top of the attentiob blocks\n\n Parameters\n ----------\n vocab_size: int\n Number of words in the vocabulary\n embed_dim: int, Optional, default = None\n Dimension of the word embeddings if non-pretained word vectors are\n used\n embed_matrix: np.ndarray, Optional, default = None\n Pretrained word embeddings\n embed_trainable: bool, default = True\n Boolean indicating if the pretrained embeddings are trainable\n rnn_type: str, default = 'lstm'\n String indicating the type of RNN to use. One of 'lstm' or 'gru'\n hidden_dim: int, default = 64\n Hidden dim of the RNN\n bidirectional: bool, default = True\n Boolean indicating whether the staked RNNs are bidirectional\n padding_idx: int, default = 1\n index of the padding token in the padded-tokenised sequences. The\n ``TextPreprocessor`` class within this library uses ``fastai``'s\n tokenizer where the token index 0 is reserved for the `'unknown'`\n word token. Therefore, the default value is set to 1.\n n_blocks: int, default = 3\n Number of attention blocks. Each block is comprised by an RNN and a\n Context Attention Encoder\n attn_concatenate: bool, default = True\n Boolean indicating if the input to the attention mechanism will be the\n output of the RNN or the output of the RNN concatenated with the last\n hidden state or simply\n attn_dropout: float, default = 0.1\n Internal dropout for the attention mechanism\n with_addnorm: bool, default = False\n Boolean indicating if the output of each block will be added to the\n input and normalised\n head_hidden_dims: List, Optional, default = None\n List with the sizes of the dense layers in the head e.g: [128, 64]\n head_activation: str, default = \"relu\"\n Activation function for the dense layers in the head. Currently\n `'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported\n head_dropout: float, Optional, default = None\n Dropout of the dense layers in the head\n head_batchnorm: bool, default = False\n Boolean indicating whether or not to include batch normalization in\n the dense layers that form the `'rnn_mlp'`\n head_batchnorm_last: bool, default = False\n Boolean indicating whether or not to apply batch normalization to the\n last of the dense layers in the head\n head_linear_first: bool, default = False\n Boolean indicating whether the order of the operations in the dense\n layer. If ``True: [LIN -> ACT -> BN -> DP]``. If ``False: [BN -> DP ->\n LIN -> ACT]``\n\n Attributes\n ----------\n word_embed: ``nn.Module``\n word embedding matrix\n rnn: ``nn.Module``\n Stack of RNNs\n rnn_mlp: ``nn.Sequential``\n Stack of dense layers on top of the RNN. This will only exists if\n ``head_layers_dim`` is not ``None``\n output_dim: int\n The output dimension of the model. This is a required attribute\n neccesary to build the ``WideDeep`` class\n\n Example\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import StackedAttentiveRNN\n >>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)\n >>> model = StackedAttentiveRNN(vocab_size=4, hidden_dim=4, padding_idx=0, embed_dim=4)\n >>> out = model(X_text)\n \"\"\"\n\n def __init__(\n self,\n vocab_size: int,\n embed_dim: Optional[int] = None,\n embed_matrix: Optional[np.ndarray] = None,\n embed_trainable: bool = True,\n rnn_type: str = \"lstm\",\n hidden_dim: int = 64,\n bidirectional: bool = False,\n padding_idx: int = 1,\n n_blocks: int = 3,\n attn_concatenate: bool = False,\n attn_dropout: float = 0.1,\n with_addnorm: bool = False,\n head_hidden_dims: Optional[List[int]] = None,\n head_activation: str = \"relu\",\n head_dropout: Optional[float] = None,\n head_batchnorm: bool = False,\n head_batchnorm_last: bool = False,\n head_linear_first: bool = False,\n ):\n super(StackedAttentiveRNN, self).__init__()\n\n if (\n embed_dim is not None\n and embed_matrix is not None\n and not embed_dim == embed_matrix.shape[1]\n ):\n warnings.warn(\n \"the input embedding dimension {} and the dimension of the \"\n \"pretrained embeddings {} do not match. The pretrained embeddings \"\n \"dimension ({}) will be used\".format(\n embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]\n ),\n UserWarning,\n )\n\n if rnn_type.lower() not in [\"lstm\", \"gru\"]:\n raise ValueError(\n f\"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead\"\n )\n\n self.vocab_size = vocab_size\n self.embed_trainable = embed_trainable\n self.embed_dim = embed_dim\n\n self.rnn_type = rnn_type\n self.hidden_dim = hidden_dim\n self.bidirectional = bidirectional\n self.padding_idx = padding_idx\n\n self.n_blocks = n_blocks\n self.attn_concatenate = attn_concatenate\n self.attn_dropout = attn_dropout\n self.with_addnorm = with_addnorm\n\n self.head_hidden_dims = head_hidden_dims\n self.head_activation = head_activation\n self.head_dropout = head_dropout\n self.head_batchnorm = head_batchnorm\n self.head_batchnorm_last = head_batchnorm_last\n self.head_linear_first = head_linear_first\n\n # Embeddings\n self.word_embed, self.embed_dim = self._set_embeddings(embed_matrix)\n\n # Linear Projection: if embed_dim is different that the input of the\n # attention blocks we add a linear projection\n if bidirectional and attn_concatenate:\n attn_input_dim = hidden_dim * 4\n elif bidirectional or attn_concatenate:\n attn_input_dim = hidden_dim * 2\n else:\n attn_input_dim = hidden_dim\n self.output_dim = attn_input_dim\n\n if attn_input_dim != self.embed_dim:\n self.embed_proj: Union[nn.Linear, nn.Identity] = nn.Linear(\n self.embed_dim, attn_input_dim\n )\n else:\n self.embed_proj = nn.Identity()\n\n # RNN\n rnn_params = {\n \"input_size\": attn_input_dim,\n \"hidden_size\": hidden_dim,\n \"bidirectional\": bidirectional,\n \"batch_first\": True,\n }\n if self.rnn_type.lower() == \"lstm\":\n self.rnn: Union[nn.LSTM, nn.GRU] = nn.LSTM(**rnn_params)\n elif self.rnn_type.lower() == \"gru\":\n self.rnn = nn.GRU(**rnn_params)\n\n # FC-Head (Mlp)\n self.attention_blks = nn.ModuleList()\n for i in range(n_blocks):\n self.attention_blks.append(\n ContextAttentionEncoder(\n self.rnn,\n attn_input_dim,\n attn_dropout,\n attn_concatenate,\n with_addnorm=with_addnorm if i != n_blocks - 1 else False,\n sum_along_seq=i == n_blocks - 1,\n )\n )\n\n # Mlp\n if self.head_hidden_dims is not None:\n head_hidden_dims = [self.output_dim] + head_hidden_dims\n self.rnn_mlp: Union[MLP, nn.Identity] = MLP(\n head_hidden_dims,\n head_activation,\n head_dropout,\n head_batchnorm,\n head_batchnorm_last,\n head_linear_first,\n )\n self.output_dim = head_hidden_dims[-1]\n else:\n # simple hack to add readability in the forward pass\n self.rnn_mlp = nn.Identity()\n\n def forward(self, X: Tensor) -> Tensor: # type: ignore\n x = self.embed_proj(self.word_embed(X.long()))\n\n h = nn.init.zeros_(\n torch.Tensor(2 if self.bidirectional else 1, X.shape[0], self.hidden_dim)\n ).to(x.device)\n if self.rnn_type == \"lstm\":\n c = nn.init.zeros_(\n torch.Tensor(\n 2 if self.bidirectional else 1, X.shape[0], self.hidden_dim\n )\n ).to(x.device)\n else:\n c = None\n\n for blk in self.attention_blks:\n x, h, c = blk(x, h, c)\n\n return self.rnn_mlp(x)\n\n @property\n def attention_weights(self) -> List:\n r\"\"\"List with the attention weights\n\n The shape of the attention weights is:\n\n :math:`(N, S)`\n\n Where *N* is the batch size and *S* is the length of the sequence\n \"\"\"\n return [blk.attn.attn_weights for blk in self.attention_blks]\n\n def _set_embeddings(\n self, embed_matrix: Union[Any, np.ndarray]\n ) -> Tuple[nn.Module, int]:\n if isinstance(embed_matrix, np.ndarray):\n assert (\n embed_matrix.dtype == \"float32\"\n ), \"'embed_matrix' must be of dtype 'float32', got dtype '{}'\".format(\n str(embed_matrix.dtype)\n )\n word_embed = nn.Embedding(\n self.vocab_size, embed_matrix.shape[1], padding_idx=self.padding_idx\n )\n if self.embed_trainable:\n word_embed.weight = nn.Parameter(\n torch.tensor(embed_matrix), requires_grad=True\n )\n else:\n word_embed.weight = nn.Parameter(\n torch.tensor(embed_matrix), requires_grad=False\n )\n embed_dim = embed_matrix.shape[1]\n else:\n word_embed = nn.Embedding(\n self.vocab_size, self.embed_dim, padding_idx=self.padding_idx\n )\n embed_dim = self.embed_dim\n\n return word_embed, embed_dim\n", "import einops\nfrom torch import nn\n\nfrom pytorch_widedeep.wdtypes import * # noqa: F403\nfrom pytorch_widedeep.models.tabular.transformers._attention_layers import (\n AddNorm,\n NormAdd,\n FeedForward,\n LinearAttention,\n AdditiveAttention,\n MultiHeadedAttention,\n)\n\n\nclass TransformerEncoder(nn.Module):\n def __init__(\n self,\n input_dim: int,\n n_heads: int,\n use_bias: bool,\n attn_dropout: float,\n ff_dropout: float,\n activation: str,\n ):\n super(TransformerEncoder, self).__init__()\n\n self.attn = MultiHeadedAttention(\n input_dim,\n n_heads,\n use_bias,\n attn_dropout,\n )\n self.ff = FeedForward(input_dim, ff_dropout, activation)\n\n self.attn_addnorm = AddNorm(input_dim, attn_dropout)\n self.ff_addnorm = AddNorm(input_dim, ff_dropout)\n\n def forward(self, X: Tensor) -> Tensor:\n x = self.attn_addnorm(X, self.attn)\n return self.ff_addnorm(x, self.ff)\n\n\nclass SaintEncoder(nn.Module):\n def __init__(\n self,\n input_dim: int,\n n_heads: int,\n use_bias: bool,\n attn_dropout: float,\n ff_dropout: float,\n activation: str,\n n_feat: int,\n ):\n super(SaintEncoder, self).__init__()\n\n self.n_feat = n_feat\n\n self.col_attn = MultiHeadedAttention(\n input_dim,\n n_heads,\n use_bias,\n attn_dropout,\n )\n self.col_attn_ff = FeedForward(input_dim, ff_dropout, activation)\n self.col_attn_addnorm = AddNorm(input_dim, attn_dropout)\n self.col_attn_ff_addnorm = AddNorm(input_dim, ff_dropout)\n\n self.row_attn = MultiHeadedAttention(\n n_feat * input_dim,\n n_heads,\n use_bias,\n attn_dropout,\n )\n self.row_attn_ff = FeedForward(n_feat * input_dim, ff_dropout, activation)\n self.row_attn_addnorm = AddNorm(n_feat * input_dim, attn_dropout)\n self.row_attn_ff_addnorm = AddNorm(n_feat * input_dim, ff_dropout)\n\n def forward(self, X: Tensor) -> Tensor:\n x = self.col_attn_addnorm(X, self.col_attn)\n x = self.col_attn_ff_addnorm(x, self.col_attn_ff)\n x = einops.rearrange(x, \"b n d -> 1 b (n d)\")\n x = self.row_attn_addnorm(x, self.row_attn)\n x = self.row_attn_ff_addnorm(x, self.row_attn_ff)\n x = einops.rearrange(x, \"1 b (n d) -> b n d\", n=self.n_feat)\n return x\n\n\nclass FTTransformerEncoder(nn.Module):\n def __init__(\n self,\n input_dim: int,\n n_feats: int,\n n_heads: int,\n use_bias: bool,\n attn_dropout: float,\n ff_dropout: float,\n kv_compression_factor: float,\n kv_sharing: bool,\n activation: str,\n ff_factor: float,\n first_block: bool,\n ):\n super(FTTransformerEncoder, self).__init__()\n\n self.first_block = first_block\n\n self.attn = LinearAttention(\n input_dim,\n n_feats,\n n_heads,\n use_bias,\n attn_dropout,\n kv_compression_factor,\n kv_sharing,\n )\n self.ff = FeedForward(input_dim, ff_dropout, activation, ff_factor)\n\n self.attn_normadd = NormAdd(input_dim, attn_dropout)\n self.ff_normadd = NormAdd(input_dim, ff_dropout)\n\n def forward(self, X: Tensor) -> Tensor:\n if self.first_block:\n x = X + self.attn(X)\n else:\n x = self.attn_normadd(X, self.attn)\n return self.ff_normadd(x, self.ff)\n\n\nclass PerceiverEncoder(nn.Module):\n def __init__(\n self,\n input_dim: int,\n n_heads: int,\n use_bias: bool,\n attn_dropout: float,\n ff_dropout: float,\n activation: str,\n query_dim: Optional[int] = None,\n ):\n super(PerceiverEncoder, self).__init__()\n\n self.attn = MultiHeadedAttention(\n input_dim,\n n_heads,\n use_bias,\n attn_dropout,\n query_dim,\n )\n attn_dim_out = query_dim if query_dim is not None else input_dim\n self.ff = FeedForward(attn_dim_out, ff_dropout, activation)\n\n self.ln_q = nn.LayerNorm(attn_dim_out)\n self.ln_kv = nn.LayerNorm(input_dim)\n self.norm_attn_dropout = nn.Dropout(attn_dropout)\n\n self.ff_norm = nn.LayerNorm(attn_dim_out)\n self.norm_ff_dropout = nn.Dropout(ff_dropout)\n\n def forward(self, X_Q: Tensor, X_KV: Optional[Tensor] = None) -> Tensor:\n x = self.ln_q(X_Q)\n y = None if X_KV is None else self.ln_kv(X_KV)\n x = x + self.norm_attn_dropout(self.attn(x, y))\n return x + self.norm_ff_dropout(self.ff(self.ff_norm(x)))\n\n\nclass FastFormerEncoder(nn.Module):\n def __init__(\n self,\n input_dim: int,\n n_heads: int,\n use_bias: bool,\n attn_dropout: float,\n ff_dropout: float,\n share_qv_weights: bool,\n activation: str,\n ):\n super(FastFormerEncoder, self).__init__()\n\n self.attn = AdditiveAttention(\n input_dim,\n n_heads,\n use_bias,\n attn_dropout,\n share_qv_weights,\n )\n\n self.ff = FeedForward(input_dim, ff_dropout, activation)\n self.attn_addnorm = AddNorm(input_dim, attn_dropout)\n self.ff_addnorm = AddNorm(input_dim, ff_dropout)\n\n def forward(self, X: Tensor) -> Tensor:\n x = self.attn_addnorm(X, self.attn)\n return self.ff_addnorm(x, self.ff)\n", "import string\n\nimport numpy as np\nimport torch\nimport pytest\n\nfrom pytorch_widedeep.models import TabMlp, WideDeep\nfrom pytorch_widedeep.training import Trainer\nfrom pytorch_widedeep.models.tabular.embeddings_layers import (\n DiffSizeCatAndContEmbeddings,\n)\n\ncolnames = list(string.ascii_lowercase)[:10]\nembed_cols = [np.random.choice(np.arange(5), 10) for _ in range(5)]\nembed_input = [(u, i, j) for u, i, j in zip(colnames[:5], [5] * 5, [16] * 5)]\ncont_cols = [np.random.rand(10) for _ in range(5)]\ncontinuous_cols = colnames[-5:]\n\nX_deep = torch.from_numpy(np.vstack(embed_cols + cont_cols).transpose())\nX_deep_emb = X_deep[:, :5]\nX_deep_cont = X_deep[:, 5:]\ntarget = np.random.choice(2, 32)\n\ntabmlp = TabMlp(\n column_idx={k: v for v, k in enumerate(colnames)},\n cat_embed_input=embed_input,\n continuous_cols=colnames[-5:],\n mlp_hidden_dims=[32, 16],\n mlp_dropout=[0.5, 0.5],\n)\n###############################################################################\n# Embeddings and NO continuous_cols\n###############################################################################\n\n\ndef test_tab_mlp_only_cat_embed():\n model = TabMlp(\n column_idx={k: v for v, k in enumerate(colnames[:5])},\n cat_embed_input=embed_input,\n mlp_hidden_dims=[32, 16],\n mlp_dropout=[0.5, 0.2],\n )\n out = model(X_deep_emb)\n assert out.size(0) == 10 and out.size(1) == 16\n\n\n###############################################################################\n# Continous cols but NO embeddings\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"embed_continuous\",\n [True, False],\n)\ndef test_tab_mlp_only_cont(embed_continuous):\n model = TabMlp(\n mlp_hidden_dims=[32, 16],\n mlp_dropout=[0.5, 0.2],\n column_idx={k: v for v, k in enumerate(colnames[5:])},\n continuous_cols=continuous_cols,\n embed_continuous=embed_continuous,\n cont_embed_dim=6,\n )\n out = model(X_deep_cont)\n assert out.size(0) == 10 and out.size(1) == 16\n\n\n###############################################################################\n# All parameters and cont norm\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"cont_norm_layer\",\n [None, \"batchnorm\", \"layernorm\"],\n)\ndef test_cont_norm_layer(cont_norm_layer):\n model = TabMlp(\n column_idx={k: v for v, k in enumerate(colnames)},\n cat_embed_input=embed_input,\n cat_embed_dropout=0.1,\n cat_embed_activation=\"relu\",\n continuous_cols=continuous_cols,\n cont_norm_layer=cont_norm_layer,\n cont_embed_activation=\"relu\",\n mlp_hidden_dims=[32, 16, 8],\n mlp_dropout=0.1,\n mlp_batchnorm=True,\n mlp_batchnorm_last=False,\n mlp_linear_first=False,\n )\n out = model(X_deep)\n assert out.size(0) == 10 and out.size(1) == 8\n\n\n###############################################################################\n# Test raise ValueError\n###############################################################################\n\n\ndef test_act_fn_ValueError():\n with pytest.raises(ValueError):\n model = TabMlp( # noqa: F841\n column_idx={k: v for v, k in enumerate(colnames)},\n cat_embed_input=embed_input,\n continuous_cols=continuous_cols,\n mlp_hidden_dims=[32, 16],\n mlp_dropout=[0.5, 0.2],\n mlp_activation=\"javier\",\n )\n\n\n###############################################################################\n# Test DiffSizeCatAndContEmbeddings\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"setup, column_idx, cat_embed_input, continuous_cols, embed_continuous\",\n [\n (\"w_cat\", {k: v for v, k in enumerate(colnames[:5])}, embed_input, None, False),\n (\n \"w_cont\",\n {k: v for v, k in enumerate(colnames[5:])},\n None,\n continuous_cols,\n False,\n ),\n (\n \"w_both\",\n {k: v for v, k in enumerate(colnames)},\n embed_input,\n continuous_cols,\n False,\n ),\n (\n \"w_both_and_embed_cont\",\n {k: v for v, k in enumerate(colnames)},\n embed_input,\n continuous_cols,\n True,\n ),\n ],\n)\ndef test_embedddings_class(\n setup, column_idx, cat_embed_input, continuous_cols, embed_continuous\n):\n\n if setup == \"w_cat\":\n X = X_deep_emb\n elif setup == \"w_cont\":\n X = X_deep_cont\n else:\n X = X_deep\n\n cat_and_cont_embed = DiffSizeCatAndContEmbeddings(\n column_idx=column_idx,\n cat_embed_input=cat_embed_input,\n cat_embed_dropout=0.1,\n use_cat_bias=setup == \"w_both_and_embed_cont\",\n continuous_cols=continuous_cols,\n embed_continuous=embed_continuous,\n cont_embed_dim=16,\n cont_embed_dropout=0.1,\n use_cont_bias=setup == \"w_both_and_embed_cont\",\n cont_norm_layer=None,\n )\n x_cat, x_cont = cat_and_cont_embed(X)\n\n if setup == \"w_cat\":\n s1 = X.shape[0]\n s2 = sum([el[2] for el in cat_and_cont_embed.cat_embed_input])\n assert x_cat.size() == torch.Size((s1, s2)) and x_cont is None\n if setup == \"w_cont\":\n s1 = X.shape[0]\n s2 = len(continuous_cols)\n assert x_cont.size() == torch.Size((s1, s2)) and x_cat is None\n if setup == \"w_both\":\n s1 = X.shape[0]\n s2_cat = sum([el[2] for el in cat_and_cont_embed.cat_embed_input])\n s2_cont = len(continuous_cols)\n assert x_cat.size() == torch.Size((s1, s2_cat)) and x_cont.size() == torch.Size(\n (s1, s2_cont)\n )\n if setup == \"w_both_and_embed_cont\":\n s1 = X.shape[0]\n s2_cat = sum([el[2] for el in cat_and_cont_embed.cat_embed_input])\n s2_cont = len(continuous_cols) * cat_and_cont_embed.cont_embed_dim\n assert x_cat.size() == torch.Size((s1, s2_cat)) and x_cont.size() == torch.Size(\n (s1, s2_cont)\n )\n\n\n###############################################################################\n# Test Feature Dsitribution Smoothing\n###############################################################################\n\n\n@pytest.mark.parametrize(\n \"with_lds\",\n [True, False],\n)\ndef test_fds(with_lds):\n # lds with model\n model = WideDeep(\n deeptabular=tabmlp,\n with_fds=True,\n momentum=None,\n clip_min=0,\n clip_max=10,\n )\n trainer = Trainer(model, objective=\"regression\", everbose=0)\n # n_epochs=2 to run self._calibrate_mean_var\n trainer.fit(X_tab=X_deep, target=target, n_epochs=3, with_lds=with_lds)\n # simply checking that runs and produces outputs\n preds = trainer.predict(X_tab=X_deep)\n module_names = list(model.named_modules())\n assert module_names[-2][0] == \"fds_layer\"\n assert module_names[-1][0] == \"fds_layer.pred_layer\"\n assert preds.shape[0] == 10 and \"train_loss\" in trainer.history\n\n trainer.model.fds_layer.reset()\n assert float(trainer.model.fds_layer.num_samples_tracked.sum()) == 0\n", "import torch\nfrom torch import nn\n\nfrom pytorch_widedeep.wdtypes import * # noqa: F403\nfrom pytorch_widedeep.models.tabular.tabnet._layers import (\n TabNetEncoder,\n initialize_non_glu,\n)\nfrom pytorch_widedeep.models.tabular._base_tabular_model import (\n BaseTabularModelWithoutAttention,\n)\n\n\nclass TabNet(BaseTabularModelWithoutAttention):\n\n r\"\"\"Defines a `TabNet model <https://arxiv.org/abs/1908.07442>`_ that\n can be used as the ``deeptabular`` component of a Wide & Deep model or\n independently by itself.\n\n The implementation in this library is fully based on that `here\n <https://github.com/dreamquark-ai/tabnet>`_, simply adapted so that it\n can work within the ``WideDeep`` frame. Therefore, **all credit to the\n dreamquark-ai team**\n\n Parameters\n ----------\n column_idx: Dict\n Dict containing the index of the columns that will be passed through\n the ``TabMlp`` model. Required to slice the tensors. e.g. {'education':\n 0, 'relationship': 1, 'workclass': 2, ...}\n cat_embed_input: List, Optional, default = None\n List of Tuples with the column name, number of unique values and\n embedding dimension. e.g. [(education, 11, 32), ...]\n cat_embed_dropout: float, default = 0.1\n Categorical embeddings dropout\n use_cat_bias: bool, default = False,\n Boolean indicating if bias will be used for the categorical embeddings\n cat_embed_activation: Optional, str, default = None,\n Activation function for the categorical embeddings, if any. `'tanh'`,\n `'relu'`, `'leaky_relu'` and `'gelu'` are supported.\n continuous_cols: List, Optional, default = None\n List with the name of the numeric (aka continuous) columns\n cont_norm_layer: str, default = \"batchnorm\"\n Type of normalization layer applied to the continuous features. Options\n are: 'layernorm', 'batchnorm' or None.\n embed_continuous: bool, default = False,\n Boolean indicating if the continuous columns will be embedded\n (i.e. passed each through a linear layer with or without activation)\n cont_embed_dim: int, default = 32,\n Size of the continuous embeddings\n cont_embed_dropout: float, default = 0.1,\n Dropout for the continuous embeddings\n use_cont_bias: bool, default = True,\n Boolean indicating if bias will be used for the continuous embeddings\n cont_embed_activation: Optional, str, default = None,\n Activation function for the continuous embeddings, if any. `'tanh'`,\n `'relu'`, `'leaky_relu'` and `'gelu'` are supported.\n n_steps: int, default = 3\n number of decision steps\n step_dim: int, default = 8\n Step's output dimension. This is the output dimension that\n ``WideDeep`` will collect and connect to the output neuron(s). For\n a better understanding of the function of this and the upcoming\n parameters, please see the `paper\n <https://arxiv.org/abs/1908.07442>`_.\n attn_dim: int, default = 8\n Attention dimension\n dropout: float, default = 0.0\n GLU block's internal dropout\n n_glu_step_dependent: int, default = 2\n number of GLU Blocks [FC -> BN -> GLU] that are step dependent\n n_glu_shared: int, default = 2\n number of GLU Blocks [FC -> BN -> GLU] that will be shared\n across decision steps\n ghost_bn: bool, default=True\n Boolean indicating if `Ghost Batch Normalization\n <https://arxiv.org/abs/1705.08741>`_ will be used.\n virtual_batch_size: int, default = 128\n Batch size when using Ghost Batch Normalization\n momentum: float, default = 0.02\n Ghost Batch Normalization's momentum. The dreamquark-ai advises for\n very low values. However high values are used in the original\n publication. During our tests higher values lead to better results\n gamma: float, default = 1.3\n Relaxation parameter in the paper. When gamma = 1, a feature is\n enforced to be used only at one decision step. As gamma increases,\n more flexibility is provided to use a feature at multiple decision\n steps\n epsilon: float, default = 1e-15\n Float to avoid log(0). Always keep low\n mask_type: str, default = \"sparsemax\"\n Mask function to use. Either \"sparsemax\" or \"entmax\"\n\n Attributes\n ----------\n cat_and_cont_embed: ``nn.Module``\n This is the module that processes the categorical and continuous columns\n tabnet_encoder: ``nn.Module``\n ``Module`` containing the TabNet encoder. See the `paper\n <https://arxiv.org/abs/1908.07442>`_.\n output_dim: int\n The output dimension of the model. This is a required attribute\n neccesary to build the ``WideDeep`` class\n\n Example\n --------\n >>> import torch\n >>> from pytorch_widedeep.models import TabNet\n >>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)\n >>> colnames = ['a', 'b', 'c', 'd', 'e']\n >>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]\n >>> column_idx = {k:v for v,k in enumerate(colnames)}\n >>> model = TabNet(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols = ['e'])\n >>> out = model(X_tab)\n \"\"\"\n\n def __init__(\n self,\n column_idx: Dict[str, int],\n cat_embed_input: Optional[List[Tuple[str, int, int]]] = None,\n cat_embed_dropout: float = 0.1,\n use_cat_bias: bool = False,\n cat_embed_activation: Optional[str] = None,\n continuous_cols: Optional[List[str]] = None,\n cont_norm_layer: str = None,\n embed_continuous: bool = False,\n cont_embed_dim: int = 32,\n cont_embed_dropout: float = 0.1,\n use_cont_bias: bool = True,\n cont_embed_activation: Optional[str] = None,\n n_steps: int = 3,\n step_dim: int = 8,\n attn_dim: int = 8,\n dropout: float = 0.0,\n n_glu_step_dependent: int = 2,\n n_glu_shared: int = 2,\n ghost_bn: bool = True,\n virtual_batch_size: int = 128,\n momentum: float = 0.02,\n gamma: float = 1.3,\n epsilon: float = 1e-15,\n mask_type: str = \"sparsemax\",\n ):\n super(TabNet, self).__init__(\n column_idx=column_idx,\n cat_embed_input=cat_embed_input,\n cat_embed_dropout=cat_embed_dropout,\n use_cat_bias=use_cat_bias,\n cat_embed_activation=cat_embed_activation,\n continuous_cols=continuous_cols,\n cont_norm_layer=cont_norm_layer,\n embed_continuous=embed_continuous,\n cont_embed_dim=cont_embed_dim,\n cont_embed_dropout=cont_embed_dropout,\n use_cont_bias=use_cont_bias,\n cont_embed_activation=cont_embed_activation,\n )\n\n self.n_steps = n_steps\n self.step_dim = step_dim\n self.attn_dim = attn_dim\n self.dropout = dropout\n self.n_glu_step_dependent = n_glu_step_dependent\n self.n_glu_shared = n_glu_shared\n self.ghost_bn = ghost_bn\n self.virtual_batch_size = virtual_batch_size\n self.momentum = momentum\n self.gamma = gamma\n self.epsilon = epsilon\n self.mask_type = mask_type\n\n # Embeddings are instantiated at the base model\n self.embed_out_dim = self.cat_and_cont_embed.output_dim\n\n # TabNet\n self.tabnet_encoder = TabNetEncoder(\n self.embed_out_dim,\n n_steps,\n step_dim,\n attn_dim,\n dropout,\n n_glu_step_dependent,\n n_glu_shared,\n ghost_bn,\n virtual_batch_size,\n momentum,\n gamma,\n epsilon,\n mask_type,\n )\n self.output_dim: int = step_dim\n\n def forward(self, X: Tensor) -> Tuple[Tensor, Tensor]:\n x = self._get_embeddings(X)\n steps_output, M_loss = self.tabnet_encoder(x)\n res = torch.sum(torch.stack(steps_output, dim=0), dim=0)\n return (res, M_loss)\n\n def forward_masks(self, X: Tensor) -> Tuple[Tensor, Dict[int, Tensor]]:\n x = self._get_embeddings(X)\n return self.tabnet_encoder.forward_masks(x)\n\n\nclass TabNetPredLayer(nn.Module):\n def __init__(self, inp, out):\n r\"\"\"This class is a 'hack' required because TabNet is a very particular\n model within ``WideDeep``.\n\n TabNet's forward method within ``WideDeep`` outputs two tensors, one\n with the last layer's activations and the sparse regularization\n factor. Since the output needs to be collected by ``WideDeep`` to then\n Sequentially build the output layer (connection to the output\n neuron(s)) I need to code a custom TabNetPredLayer that accepts two\n inputs. This will be used by the ``WideDeep`` class.\n \"\"\"\n super(TabNetPredLayer, self).__init__()\n self.pred_layer = nn.Linear(inp, out, bias=False)\n initialize_non_glu(self.pred_layer, inp, out)\n\n def forward(self, tabnet_tuple: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]:\n res, M_loss = tabnet_tuple[0], tabnet_tuple[1]\n return self.pred_layer(res), M_loss\n" ]
[ [ "torch.nn.Sequential" ], [ "torch.nn.Linear", "torch.nn.Identity", "torch.nn.LSTM", "torch.nn.GRU", "torch.nn.ModuleList", "torch.tensor", "torch.Tensor", "torch.nn.Embedding" ], [ "torch.nn.Dropout", "torch.nn.LayerNorm" ], [ "torch.Size", "numpy.random.choice", "numpy.random.rand", "numpy.arange", "numpy.vstack" ], [ "torch.nn.Linear", "torch.stack" ] ]
dczifra/lightly
[ "d8bff271c6951da5b1b28c5d4c31ceba41aead80" ]
[ "docs/source/getting_started/benchmarks/cifar10_benchmark.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nBenchmark Results\n\nUpdated: 18.02.2022 (6618fa3c36b0c9f3a9d7a21bcdb00bf4fd258ee8))\n\n------------------------------------------------------------------------------------------\n| Model | Batch Size | Epochs | KNN Test Accuracy | Time | Peak GPU Usage |\n------------------------------------------------------------------------------------------\n| BarlowTwins | 128 | 200 | 0.835 | 193.4 Min | 2.2 GByte |\n| BYOL | 128 | 200 | 0.872 | 217.0 Min | 2.3 GByte |\n| DINO | 128 | 200 | 0.868 | 220.7 Min | 2.3 GByte |\n| Moco | 128 | 200 | 0.838 | 229.5 Min | 2.3 GByte |\n| NNCLR | 128 | 200 | 0.838 | 198.7 Min | 2.2 GByte |\n| SimCLR | 128 | 200 | 0.822 | 182.7 Min | 2.2 GByte |\n| SimSiam | 128 | 200 | 0.779 | 182.6 Min | 2.3 GByte |\n| SwaV | 128 | 200 | 0.806 | 182.4 Min | 2.2 GByte |\n------------------------------------------------------------------------------------------\n| BarlowTwins | 512 | 200 | 0.827 | 160.7 Min | 7.5 GByte |\n| BYOL | 512 | 200 | 0.872 | 188.5 Min | 7.7 GByte |\n| DINO | 512 | 200 | 0.862 | 191.1 Min | 7.5 GByte |\n| Moco (*) | 512 | 200 | 0.850 | 196.8 Min | 7.8 GByte |\n| NNCLR (*) | 512 | 200 | 0.836 | 164.7 Min | 7.6 GByte |\n| SimCLR | 512 | 200 | 0.828 | 158.2 Min | 7.5 GByte |\n| SimSiam | 512 | 200 | 0.814 | 159.0 Min | 7.6 GByte |\n| SwaV | 512 | 200 | 0.833 | 158.4 Min | 7.5 GByte |\n------------------------------------------------------------------------------------------\n| BarlowTwins | 512 | 800 | 0.857 | 641.5 Min | 7.5 GByte |\n| BYOL | 512 | 800 | 0.911 | 754.2 Min | 7.8 GByte |\n| DINO | 512 | 800 | 0.884 | 765.5 Min | 7.6 GByte |\n| Moco (*) | 512 | 800 | 0.900 | 787.7 Min | 7.8 GByte |\n| NNCLR (*) | 512 | 800 | 0.896 | 659.2 Min | 7.6 GByte |\n| SimCLR | 512 | 800 | 0.875 | 632.5 Min | 7.5 GByte |\n| SimSiam | 512 | 800 | 0.906 | 636.5 Min | 7.6 GByte |\n| SwaV | 512 | 800 | 0.881 | 634.9 Min | 7.5 GByte |\n------------------------------------------------------------------------------------------\n\n(*): Increased size of memory bank from 4096 to 8192 to avoid too quickly \nchanging memory bank due to larger batch size.\n\nThe benchmarks were created on a single NVIDIA RTX A6000.\n\nNote that this benchmark also supports a multi-GPU setup. If you run it on\na system with multiple GPUs make sure that you kill all the processes when\nkilling the application. Due to the way we setup this benchmark the distributed\nprocesses might continue the benchmark if one of the nodes is killed.\nIf you know how to fix this don't hesitate to create an issue or PR :)\n\n\"\"\"\nimport copy\nimport os\n\nimport time\nimport lightly\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom lightly.models import modules\nfrom lightly.models.modules import heads\nfrom lightly.models import utils\nfrom lightly.utils import BenchmarkModule\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\nlogs_root_dir = os.path.join(os.getcwd(), 'benchmark_logs')\n\n# set max_epochs to 800 for long run (takes around 10h on a single V100)\nmax_epochs = 1\nnum_workers = 8\nknn_k = 200\nknn_t = 0.1\nclasses = 10\n\n# Set to True to enable Distributed Data Parallel training.\ndistributed = True\n\n# Set to True to enable Synchronized Batch Norm (requires distributed=True). \n# If enabled the batch norm is calculated over all gpus, otherwise the batch\n# norm is only calculated from samples on the same gpu.\nsync_batchnorm = False\n\n# Set to True to gather features from all gpus before calculating \n# the loss (requires distributed=True).\n# If enabled then the loss on every gpu is calculated with features from all \n# gpus, otherwise only features from the same gpu are used.\ngather_distributed = True \n\n# benchmark\nn_runs = 1 # optional, increase to create multiple runs and report mean + std\nbatch_size = 512\nlr_factor = batch_size / 128 # scales the learning rate linearly with batch size\n\n# use a GPU if available\n#gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0\ngpus = 4 if torch.cuda.is_available() else 0\nprint(gpus)\nif distributed:\n distributed_backend = 'ddp'\n # reduce batch size for distributed training\n batch_size = batch_size // gpus\nelse:\n distributed_backend = None\n # limit to single gpu if not using distributed training\n gpus = min(gpus, 1)\n\n# Adapted from our MoCo Tutorial on CIFAR-10\n#\n# Replace the path with the location of your CIFAR-10 dataset.\n# We assume we have a train folder with subfolders\n# for each class and .png images inside.\n#\n# You can download `CIFAR-10 in folders from kaggle \n# <https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders>`_.\n\n# The dataset structure should be like this:\n# cifar10/train/\n# L airplane/\n# L 10008_airplane.png\n# L ...\n# L automobile/\n# L bird/\n# L cat/\n# L deer/\n# L dog/\n# L frog/\n# L horse/\n# L ship/\n# L truck/\npath_to_train = './data/cifar10/train/'\npath_to_test = './data/cifar10/test/'\n\n# Use SimCLR augmentations, additionally, disable blur for cifar10\ncollate_fn = lightly.data.SimCLRCollateFunction(\n input_size=32,\n gaussian_blur=0.,\n)\n\n# Multi crop augmentation for SwAV, additionally, disable blur for cifar10\nswav_collate_fn = lightly.data.SwaVCollateFunction(\n crop_sizes=[32],\n crop_counts=[2], # 2 crops @ 32x32px\n crop_min_scales=[0.14],\n gaussian_blur=0,\n)\n\n# Multi crop augmentation for DINO, additionally, disable blur for cifar10\ndino_collate_fn = lightly.data.DINOCollateFunction(\n global_crop_size=32,\n n_local_views=0,\n gaussian_blur=(0, 0, 0),\n)\n\n# No additional augmentations for the test set\ntest_transforms = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n mean=lightly.data.collate.imagenet_normalize['mean'],\n std=lightly.data.collate.imagenet_normalize['std'],\n )\n])\n\ndataset_train_ssl = lightly.data.LightlyDataset(\n input_dir=path_to_train\n)\n\n# we use test transformations for getting the feature for kNN on train data\ndataset_train_kNN = lightly.data.LightlyDataset(\n input_dir=path_to_train,\n transform=test_transforms\n)\n\ndataset_test = lightly.data.LightlyDataset(\n input_dir=path_to_test,\n transform=test_transforms\n)\n\ndef get_data_loaders(batch_size: int, model):\n \"\"\"Helper method to create dataloaders for ssl, kNN train and kNN test\n\n Args:\n batch_size: Desired batch size for all dataloaders\n \"\"\"\n col_fn = collate_fn\n if isinstance(model, SwaVModel):\n col_fn = swav_collate_fn\n elif isinstance(model, DINOModel):\n col_fn = dino_collate_fn\n dataloader_train_ssl = torch.utils.data.DataLoader(\n dataset_train_ssl,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=col_fn,\n drop_last=True,\n num_workers=num_workers\n )\n\n dataloader_train_kNN = torch.utils.data.DataLoader(\n dataset_train_kNN,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=num_workers\n )\n\n dataloader_test = torch.utils.data.DataLoader(\n dataset_test,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=num_workers\n )\n\n return dataloader_train_ssl, dataloader_train_kNN, dataloader_test\n\nclass MocoModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n\n # create a ResNet backbone and remove the classification head\n num_splits = 0 if sync_batchnorm else 8\n resnet = lightly.models.ResNetGenerator('resnet-18', num_splits=num_splits)\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n\n # create a moco model based on ResNet\n self.projection_head = heads.MoCoProjectionHead(512, 512, 128)\n self.backbone_momentum = copy.deepcopy(self.backbone)\n self.projection_head_momentum = copy.deepcopy(self.projection_head)\n utils.deactivate_requires_grad(self.backbone_momentum)\n utils.deactivate_requires_grad(self.projection_head_momentum)\n\n # create our loss with the optional memory bank\n self.criterion = lightly.loss.NTXentLoss(\n temperature=0.1,\n memory_bank_size=4096,\n )\n \n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n return self.projection_head(x)\n\n def training_step(self, batch, batch_idx):\n (x0, x1), _, _ = batch\n\n # update momentum\n utils.update_momentum(self.backbone, self.backbone_momentum, 0.99)\n utils.update_momentum(self.projection_head, self.projection_head_momentum, 0.99)\n\n def step(x0_, x1_):\n x1_, shuffle = utils.batch_shuffle(x1_, distributed=distributed)\n x0_ = self.backbone(x0_).flatten(start_dim=1)\n x0_ = self.projection_head(x0_)\n\n x1_ = self.backbone_momentum(x1_).flatten(start_dim=1)\n x1_ = self.projection_head_momentum(x1_)\n x1_ = utils.batch_unshuffle(x1_, shuffle, distributed=distributed)\n return x0_, x1_\n\n # We use a symmetric loss (model trains faster at little compute overhead)\n # https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb\n loss_1 = self.criterion(*step(x0, x1))\n loss_2 = self.criterion(*step(x1, x0))\n\n loss = 0.5 * (loss_1 + loss_2)\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n params = list(self.backbone.parameters()) + list(self.projection_head.parameters())\n optim = torch.optim.SGD(\n params, \n lr=6e-2 * lr_factor,\n momentum=0.9, \n weight_decay=5e-4,\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\n\nclass SimCLRModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n self.projection_head = heads.SimCLRProjectionHead(512, 512, 128)\n self.criterion = lightly.loss.NTXentLoss()\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z\n\n def training_step(self, batch, batch_index):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(\n self.parameters(), \n lr=6e-2 * lr_factor,\n momentum=0.9, \n weight_decay=5e-4\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\n\nclass SimSiamModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n self.prediction_head = heads.SimSiamPredictionHead(2048, 512, 2048)\n # use a 2-layer projection head for cifar10 as described in the paper\n self.projection_head = heads.ProjectionHead([\n (\n 512,\n 2048,\n nn.BatchNorm1d(2048),\n nn.ReLU(inplace=True)\n ),\n (\n 2048,\n 2048,\n nn.BatchNorm1d(2048),\n None\n )\n ])\n self.criterion = lightly.loss.NegativeCosineSimilarity()\n \n def forward(self, x):\n f = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(f)\n p = self.prediction_head(z)\n z = z.detach()\n return z, p\n\n def training_step(self, batch, batch_idx):\n (x0, x1), _, _ = batch\n z0, p0 = self.forward(x0)\n z1, p1 = self.forward(x1)\n loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(\n self.parameters(), \n lr=6e-2, # no lr-scaling, results in better training stability\n momentum=0.9,\n weight_decay=5e-4\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\nclass BarlowTwinsModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n # use a 2-layer projection head for cifar10 as described in the paper\n self.projection_head = heads.ProjectionHead([\n (\n 512,\n 2048,\n nn.BatchNorm1d(2048),\n nn.ReLU(inplace=True)\n ),\n (\n 2048,\n 2048,\n None,\n None\n )\n ])\n\n self.criterion = lightly.loss.BarlowTwinsLoss(gather_distributed=gather_distributed)\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z\n\n def training_step(self, batch, batch_index):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(\n self.parameters(), \n lr=6e-2 * lr_factor,\n momentum=0.9, \n weight_decay=5e-4\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\nclass BYOLModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n\n # create a byol model based on ResNet\n self.projection_head = heads.BYOLProjectionHead(512, 1024, 256)\n self.prediction_head = heads.BYOLProjectionHead(256, 1024, 256)\n\n self.backbone_momentum = copy.deepcopy(self.backbone)\n self.projection_head_momentum = copy.deepcopy(self.projection_head)\n\n utils.deactivate_requires_grad(self.backbone_momentum)\n utils.deactivate_requires_grad(self.projection_head_momentum)\n\n self.criterion = lightly.loss.NegativeCosineSimilarity()\n\n def forward(self, x):\n y = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(y)\n p = self.prediction_head(z)\n return p\n\n def forward_momentum(self, x):\n y = self.backbone_momentum(x).flatten(start_dim=1)\n z = self.projection_head_momentum(y)\n z = z.detach()\n return z\n\n def training_step(self, batch, batch_idx):\n utils.update_momentum(self.backbone, self.backbone_momentum, m=0.99)\n utils.update_momentum(self.projection_head, self.projection_head_momentum, m=0.99)\n (x0, x1), _, _ = batch\n p0 = self.forward(x0)\n z0 = self.forward_momentum(x0)\n p1 = self.forward(x1)\n z1 = self.forward_momentum(x1)\n loss = 0.5 * (self.criterion(p0, z1) + self.criterion(p1, z0))\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n params = list(self.backbone.parameters()) \\\n + list(self.projection_head.parameters()) \\\n + list(self.prediction_head.parameters())\n optim = torch.optim.SGD(\n params, \n lr=6e-2 * lr_factor,\n momentum=0.9, \n weight_decay=5e-4,\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\nclass SwaVModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n\n self.projection_head = heads.SwaVProjectionHead(512, 512, 128)\n self.prototypes = heads.SwaVPrototypes(128, 512) # use 512 prototypes\n\n self.criterion = lightly.loss.SwaVLoss(sinkhorn_gather_distributed=gather_distributed)\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n x = self.projection_head(x)\n x = nn.functional.normalize(x, dim=1, p=2)\n return self.prototypes(x)\n\n def training_step(self, batch, batch_idx):\n # normalize the prototypes so they are on the unit sphere\n self.prototypes.normalize()\n\n # the multi-crop dataloader returns a list of image crops where the\n # first two items are the high resolution crops and the rest are low\n # resolution crops\n multi_crops, _, _ = batch\n multi_crop_features = [self.forward(x) for x in multi_crops]\n\n # split list of crop features into high and low resolution\n high_resolution_features = multi_crop_features[:2]\n low_resolution_features = multi_crop_features[2:]\n\n # calculate the SwaV loss\n loss = self.criterion(\n high_resolution_features,\n low_resolution_features\n )\n\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.Adam(\n self.parameters(),\n lr=1e-3 * lr_factor,\n weight_decay=1e-6,\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\n\nclass NNCLRModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n self.prediction_head = heads.NNCLRPredictionHead(256, 4096, 256)\n # use only a 2-layer projection head for cifar10\n self.projection_head = heads.ProjectionHead([\n (\n 512,\n 2048,\n nn.BatchNorm1d(2048),\n nn.ReLU(inplace=True)\n ),\n (\n 2048,\n 256,\n nn.BatchNorm1d(256),\n None\n )\n ])\n\n self.criterion = lightly.loss.NTXentLoss()\n self.memory_bank = modules.NNMemoryBankModule(size=4096)\n\n def forward(self, x):\n y = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(y)\n p = self.prediction_head(z)\n z = z.detach()\n return z, p\n\n def training_step(self, batch, batch_idx):\n (x0, x1), _, _ = batch\n z0, p0 = self.forward(x0)\n z1, p1 = self.forward(x1)\n z0 = self.memory_bank(z0, update=False)\n z1 = self.memory_bank(z1, update=True)\n loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0))\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(\n self.parameters(), \n lr=6e-2 * lr_factor,\n momentum=0.9, \n weight_decay=5e-4,\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\n\nclass DINOModel(BenchmarkModule):\n def __init__(self, dataloader_kNN, num_classes):\n super().__init__(dataloader_kNN, num_classes)\n # create a ResNet backbone and remove the classification head\n resnet = lightly.models.ResNetGenerator('resnet-18')\n self.backbone = nn.Sequential(\n *list(resnet.children())[:-1],\n nn.AdaptiveAvgPool2d(1)\n )\n self.head = self._build_projection_head()\n self.teacher_backbone = copy.deepcopy(self.backbone)\n self.teacher_head = self._build_projection_head()\n\n utils.deactivate_requires_grad(self.teacher_backbone)\n utils.deactivate_requires_grad(self.teacher_head)\n\n self.criterion = lightly.loss.DINOLoss(output_dim=2048)\n\n def _build_projection_head(self):\n head = heads.DINOProjectionHead(512, 2048, 256, 2048, batch_norm=True)\n # use only 2 layers for cifar10\n head.layers = heads.ProjectionHead([\n (512, 2048, nn.BatchNorm1d(2048), nn.GELU()),\n (2048, 256, None, None),\n ]).layers\n return head\n\n def forward(self, x):\n y = self.backbone(x).flatten(start_dim=1)\n z = self.head(y)\n return z\n\n def forward_teacher(self, x):\n y = self.teacher_backbone(x).flatten(start_dim=1)\n z = self.teacher_head(y)\n return z\n\n def training_step(self, batch, batch_idx):\n utils.update_momentum(self.backbone, self.teacher_backbone, m=0.99)\n utils.update_momentum(self.head, self.teacher_head, m=0.99)\n views, _, _ = batch\n views = [view.to(self.device) for view in views]\n global_views = views[:2]\n teacher_out = [self.forward_teacher(view) for view in global_views]\n student_out = [self.forward(view) for view in views]\n loss = self.criterion(teacher_out, student_out, epoch=self.current_epoch)\n self.log('train_loss_ssl', loss)\n return loss\n\n def configure_optimizers(self):\n param = list(self.backbone.parameters()) \\\n + list(self.head.parameters())\n optim = torch.optim.SGD(\n param,\n lr=6e-2 * lr_factor,\n momentum=0.9,\n weight_decay=5e-4,\n )\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, max_epochs)\n return [optim], [scheduler]\n\nmodels = [\n BarlowTwinsModel, \n BYOLModel,\n DINOModel,\n MocoModel,\n NNCLRModel,\n SimCLRModel,\n SimSiamModel,\n SwaVModel,\n]\nbench_results = dict()\n\nexperiment_version = None\n# loop through configurations and train models\nfor BenchmarkModel in models:\n runs = []\n model_name = BenchmarkModel.__name__.replace('Model', '')\n for seed in range(n_runs):\n pl.seed_everything(seed)\n dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(\n batch_size=batch_size, \n model=BenchmarkModel,\n )\n benchmark_model = BenchmarkModel(dataloader_train_kNN, classes)\n\n # Save logs to: {CWD}/benchmark_logs/cifar10/{experiment_version}/{model_name}/\n # If multiple runs are specified a subdirectory for each run is created.\n sub_dir = model_name if n_runs <= 1 else f'{model_name}/run{seed}'\n logger = TensorBoardLogger(\n save_dir=os.path.join(logs_root_dir, 'cifar10'),\n name='',\n sub_dir=sub_dir,\n version=experiment_version,\n )\n if experiment_version is None:\n # Save results of all models under same version directory\n experiment_version = logger.version\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n dirpath=os.path.join(logger.log_dir, 'checkpoints')\n )\n trainer = pl.Trainer(\n max_epochs=max_epochs, \n gpus=gpus,\n default_root_dir=logs_root_dir,\n strategy=distributed_backend,\n sync_batchnorm=sync_batchnorm,\n logger=logger,\n callbacks=[checkpoint_callback]\n )\n start = time.time()\n trainer.fit(\n benchmark_model,\n train_dataloaders=dataloader_train_ssl,\n val_dataloaders=dataloader_test\n )\n end = time.time()\n run = {\n 'model': model_name,\n 'batch_size': batch_size,\n 'epochs': max_epochs,\n 'max_accuracy': benchmark_model.max_accuracy,\n 'runtime': end - start,\n 'gpu_memory_usage': torch.cuda.max_memory_allocated(),\n 'seed': seed,\n }\n runs.append(run)\n print(run)\n\n # delete model and trainer + free up cuda memory\n del benchmark_model\n del trainer\n torch.cuda.reset_peak_memory_stats()\n torch.cuda.empty_cache()\n \n bench_results[model_name] = runs\n\n# print results table\nheader = (\n f\"| {'Model':<13} | {'Batch Size':>10} | {'Epochs':>6} \"\n f\"| {'KNN Test Accuracy':>18} | {'Time':>10} | {'Peak GPU Usage':>14} |\"\n)\nprint('-' * len(header))\nprint(header)\nprint('-' * len(header))\nfor model, results in bench_results.items():\n runtime = np.array([result['runtime'] for result in results])\n runtime = runtime.mean() / 60 # convert to min\n accuracy = np.array([result['max_accuracy'] for result in results])\n gpu_memory_usage = np.array([result['gpu_memory_usage'] for result in results])\n gpu_memory_usage = gpu_memory_usage.max() / (1024**3) # convert to gbyte\n\n if len(accuracy) > 1:\n accuracy_msg = f\"{accuracy.mean():>8.3f} +- {accuracy.std():>4.3f}\"\n else:\n accuracy_msg = f\"{accuracy.mean():>18.3f}\"\n\n print(\n f\"| {model:<13} | {batch_size:>10} | {max_epochs:>6} \"\n f\"| {accuracy_msg} | {runtime:>6.1f} Min \"\n f\"| {gpu_memory_usage:>8.1f} GByte |\",\n flush=True\n )\nprint('-' * len(header))\n" ]
[ [ "torch.nn.functional.normalize", "numpy.array", "torch.cuda.reset_peak_memory_stats", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.cuda.max_memory_allocated", "torch.optim.SGD", "torch.nn.ReLU", "torch.cuda.empty_cache", "torch.cuda.is_available", "torch.nn.BatchNorm1d", "torch.utils.data.DataLoader", "torch.nn.AdaptiveAvgPool2d", "torch.nn.GELU" ] ]
calispac/cta-lstchain
[ "83c7058a291c55f5841e984b99f22ac49e759d0f" ]
[ "lstchain/image/pdf.py" ]
[ "import numpy as np\n\n\ndef log_gaussian(x, mean, sigma):\n \"\"\"\n Computes the log-probability of X=x for a Gaussian of mean=mean and sigma=sigma\n Parameters\n ----------\n x\n mean\n sigma\n\n Returns\n -------\n\n \"\"\"\n\n log_pdf = -(x - mean) ** 2 / (2 * sigma ** 2)\n log_pdf = log_pdf - np.log((np.sqrt(2 * np.pi) * sigma))\n\n return log_pdf\n\n\ndef log_gaussian2d(size, x, y, x_cm, y_cm, width, length, psi):\n\n scale_w = 1. / (2. * width ** 2)\n scale_l = 1. / (2. * length ** 2)\n a = np.cos(psi) ** 2 * scale_l + np.sin(psi) ** 2 * scale_w\n b = np.sin(2 * psi) * (scale_w - scale_l) / 2.\n c = np.cos(psi) ** 2 * scale_w + np.sin(psi) ** 2 * scale_l\n\n norm = 1. / (2 * np.pi * width * length)\n\n log_pdf = - (a * (x - x_cm) ** 2 - 2 * b * (x - x_cm) * (y - y_cm) + c * (\n y - y_cm) ** 2)\n\n log_pdf += np.log(norm) + np.log(size)\n\n return log_pdf" ]
[ [ "numpy.sin", "numpy.sqrt", "numpy.log", "numpy.cos" ] ]
cpfpengfei/deepchem
[ "a3d827ddeaa181157237894abe5055e200cfd27e" ]
[ "deepchem/utils/test/test_molecule_graph.py" ]
[ "import unittest\nimport pytest\nimport numpy as np\nfrom deepchem.utils.molecule_graph import MoleculeGraphData, BatchMoleculeGraphData\n\n\nclass TestMoleculeGraph(unittest.TestCase):\n\n def test_molecule_graph_data(self):\n num_nodes, num_node_features = 4, 32\n num_edges, num_edge_features = 6, 32\n node_features = np.random.random_sample((num_nodes, num_node_features))\n edge_features = np.random.random_sample((num_edges, num_edge_features))\n targets = np.random.random_sample(5)\n edge_index = np.array([\n [0, 1, 2, 2, 3, 4],\n [1, 2, 0, 3, 4, 0],\n ])\n graph_features = None\n\n mol_graph = MoleculeGraphData(\n node_features=node_features,\n edge_index=edge_index,\n targets=targets,\n edge_features=edge_features,\n graph_features=graph_features)\n\n assert mol_graph.num_nodes == num_nodes\n assert mol_graph.num_node_features == num_node_features\n assert mol_graph.num_edges == num_edges\n assert mol_graph.num_edge_features == num_edge_features\n assert mol_graph.targets.shape == (5,)\n\n def test_invalid_molecule_graph_data(self):\n with pytest.raises(ValueError):\n invalid_node_features_type = list(np.random.random_sample((5, 5)))\n edge_index = np.array([\n [0, 1, 2, 2, 3, 4],\n [1, 2, 0, 3, 4, 0],\n ])\n targets = np.random.random_sample(5)\n mol_graph = MoleculeGraphData(\n node_features=invalid_node_features_type,\n edge_index=edge_index,\n targets=targets,\n )\n\n with pytest.raises(ValueError):\n node_features = np.random.random_sample((5, 5))\n invalid_edge_index_shape = np.array([\n [0, 1, 2, 2, 3, 4],\n [1, 2, 0, 3, 4, 0],\n [2, 2, 1, 4, 0, 3],\n ])\n targets = np.random.random_sample(5)\n mol_graph = MoleculeGraphData(\n node_features=node_features,\n edge_index=invalid_edge_index_shape,\n targets=targets,\n )\n\n with pytest.raises(TypeError):\n node_features = np.random.random_sample((5, 5))\n mol_graph = MoleculeGraphData(node_features=node_features)\n\n def test_batch_molecule_graph_data(self):\n num_nodes_list, num_edge_list = [3, 4, 5], [2, 4, 5]\n num_node_features, num_edge_features = 32, 32\n edge_index_list = [\n np.array([[0, 1], [1, 2]]),\n np.array([[0, 1, 2, 3], [1, 2, 0, 2]]),\n np.array([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]])\n ]\n targets = np.random.random_sample(5)\n\n molecule_graphs = [\n MoleculeGraphData(\n node_features=np.random.random_sample((num_nodes_list[i],\n num_node_features)),\n edge_index=edge_index_list[i],\n targets=targets,\n edge_features=np.random.random_sample((num_edge_list[i],\n num_edge_features)),\n graph_features=None) for i in range(len(num_edge_list))\n ]\n batch = BatchMoleculeGraphData(molecule_graphs)\n\n assert batch.num_nodes == sum(num_nodes_list)\n assert batch.num_node_features == num_node_features\n assert batch.num_edges == sum(num_edge_list)\n assert batch.num_edge_features == num_edge_features\n assert batch.targets.shape == (3, 5)\n assert batch.graph_index.shape == (sum(num_nodes_list),)\n" ]
[ [ "numpy.array", "numpy.random.random_sample" ] ]
gizatt/pose_estimation_segmentation_analysis
[ "5356fd85a01aff5b2284126f7349d9fc7f33c4b6" ]
[ "src/utils.py" ]
[ "import numpy as np\nimport os\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom pydrake.multibody.rigid_body import RigidBody\nfrom pydrake.all import (\n AddFlatTerrainToWorld,\n AddModelInstancesFromSdfString,\n AddModelInstanceFromUrdfFile,\n FindResourceOrThrow,\n FloatingBaseType,\n InputPort,\n Isometry3,\n OutputPort,\n RgbdCamera,\n RigidBodyPlant,\n RigidBodyTree,\n RigidBodyFrame,\n RollPitchYaw,\n RollPitchYawFloatingJoint,\n RotationMatrix,\n Value,\n VisualElement,\n )\n\nimport meshcat\nimport meshcat.transformations as tf\nimport meshcat.geometry as g\n\n\n# From\n# https://www.opengl.org/discussion_boards/showthread.php/197893-View-and-Perspective-matrices\ndef normalize(x):\n return x / np.linalg.norm(x)\n\n\ndef save_pointcloud(pc, normals, path):\n joined = np.hstack([pc.T, normals.T])\n np.savetxt(path, joined)\n\n\ndef load_pointcloud(path):\n joined = np.loadtxt(path)\n return joined[:, 0:3].T, joined[:, 3:6].T\n\n\ndef translate(x):\n T = np.eye(4)\n T[0:3, 3] = x[:3]\n return T\n\n\ndef get_pose_error(tf_1, tf_2):\n rel_tf = transform_inverse(tf_1).dot(tf_2)\n if np.allclose(np.diag(rel_tf[0:3, 0:3]), [1., 1., 1.]):\n angle_dist = 0.\n else:\n # Angle from rotation matrix\n angle_dist = np.arccos(\n (np.sum(np.diag(rel_tf[0:3, 0:3])) - 1) / 2.)\n euclid_dist = np.linalg.norm(rel_tf[0:3, 3])\n return euclid_dist, angle_dist\n\n\n# If misalignment_tol = None, returns the average\n# distance between the model clouds when transformed\n# by est_tf and gt_tf (using nearest-point lookups\n# for each point in the gt-tf'd model cloud).\n# If misalignment_tol is a number, it returns\n# the percent of points that are misaligned by more\n# than the misalignment error under the same distance\n# metric.\ndef get_earth_movers_error(est_tf, gt_tf, model_cloud,\n misalignment_tol=0.005):\n # Transform the model cloud into both frames\n est_model_cloud = transform_points(est_tf, model_cloud)\n gt_model_cloud = transform_points(gt_tf, model_cloud)\n # For every point in the model cloud, find the distance\n # to the closest point in the estimated model cloud,\n # as a way of finding the swept volume between the\n # models in those poses.\n neigh = NearestNeighbors(n_neighbors=1)\n neigh.fit(gt_model_cloud.T)\n dist, _ = neigh.kneighbors(\n est_model_cloud[0:3, :].T, return_distance=True)\n if misalignment_tol is None:\n return np.mean(dist)\n else:\n return np.mean(dist > misalignment_tol)\n\n\ndef draw_points(vis, vis_prefix, name, points,\n normals=None, colors=None, size=0.001,\n normals_length=0.01):\n vis[vis_prefix][name].set_object(\n g.PointCloud(position=points,\n color=colors,\n size=size))\n n_pts = points.shape[1]\n if normals is not None:\n # Drawing normals for debug\n lines = np.zeros([3, n_pts*2])\n inds = np.array(range(0, n_pts*2, 2))\n lines[:, inds] = points[0:3, :]\n lines[:, inds+1] = points[0:3, :] + \\\n normals * normals_length\n vis[vis_prefix][\"%s_normals\" % name].set_object(\n meshcat.geometry.LineSegmentsGeometry(\n lines, None))\n\n\ndef transform_points(tf, pts):\n return ((tf[:3, :3].dot(pts).T) + tf[:3, 3]).T\n\n\ndef transform_inverse(tf):\n new_tf = np.eye(4)\n new_tf[:3, :3] = tf[:3, :3].T\n new_tf[:3, 3] = -new_tf[:3, :3].dot(tf[:3, 3])\n return new_tf\n\n\ndef lookat(eye, target, up):\n # For a camera with +x right, +y down, and +z forward.\n eye = np.array(eye)\n target = np.array(target)\n up = np.array(up)\n F = target[:3] - eye[:3]\n f = normalize(F)\n U = normalize(up[:3])\n s = np.cross(f, U) # right\n u = np.cross(s, f) # up\n M = np.eye(4)\n M[:3, :3] = np.vstack([s, -u, f]).T\n\n # OLD:\n # flip z -> x\n # -x -> y\n # -y -> z\n # CAMERA FORWARD is +x-axis\n # CAMERA RIGHT is -y axis\n # CAMERA UP is +z axis\n # Why does the Drake documentation lie to me???\n T = translate(eye)\n return T.dot(M)\n\n\ndef add_single_instance_to_rbt(\n rbt, config, instance_config, i,\n floating_base_type=FloatingBaseType.kRollPitchYaw):\n class_name = instance_config[\"class\"]\n if class_name not in config[\"objects\"].keys():\n raise ValueError(\"Class %s not in classes.\" % class_name)\n if len(instance_config[\"pose\"]) != 6:\n raise ValueError(\"Class %s has pose size != 6. Use RPY plz\" %\n class_name)\n frame = RigidBodyFrame(\n \"%s_%d\" % (class_name, i), rbt.world(),\n instance_config[\"pose\"][0:3],\n instance_config[\"pose\"][3:6])\n model_path = config[\"objects\"][class_name][\"model_path\"]\n _, extension = os.path.splitext(model_path)\n if extension == \".urdf\":\n AddModelInstanceFromUrdfFile(\n model_path, floating_base_type, frame, rbt)\n elif extension == \".sdf\":\n AddModelInstancesFromSdfString(\n open(model_path).read(), floating_base_type, frame, rbt)\n else:\n raise ValueError(\"Class %s has non-sdf and non-urdf model name.\" %\n class_name)\n\n\ndef setup_scene(rbt, config):\n if config[\"with_ground\"] is True:\n AddFlatTerrainToWorld(rbt)\n\n for i, instance_config in enumerate(config[\"instances\"]):\n add_single_instance_to_rbt(rbt, config, instance_config, i,\n floating_base_type=FloatingBaseType.kFixed)\n # Add camera geometry!\n camera_link = RigidBody()\n camera_link.set_name(\"camera_link\")\n # necessary so this last link isn't pruned by the rbt.compile() call\n camera_link.set_spatial_inertia(np.eye(6))\n camera_link.add_joint(\n rbt.world(),\n RollPitchYawFloatingJoint(\n \"camera_floating_base\",\n np.eye(4)))\n rbt.add_rigid_body(camera_link)\n\n # - Add frame for camera fixture.\n camera_frame = RigidBodyFrame(\n name=\"rgbd_camera_frame\", body=camera_link,\n xyz=[0.0, 0., 0.], rpy=[0., 0., 0.])\n rbt.addFrame(camera_frame)\n rbt.compile()" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.savetxt", "numpy.zeros", "numpy.mean", "numpy.eye", "numpy.loadtxt", "numpy.diag", "numpy.vstack", "numpy.hstack", "sklearn.neighbors.NearestNeighbors", "numpy.cross" ] ]
mbchang/societal-decision-making
[ "23fd6de4df33f985d360330a9d5a2c29faeb8e52" ]
[ "starter_code/modules/value_function.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom starter_code.modules.networks import MLP, MinigridCNN\nfrom mnist.embedded_mnist import MNIST_CNN\n\nclass SimpleValueFn(nn.Module):\n def __init__(self, state_dim, hdim):\n super(SimpleValueFn, self).__init__()\n self.value_net = MLP(dims=[state_dim, *hdim, 1])\n\n def forward(self, state):\n state_values = self.value_net(state)\n return state_values\n\nclass CNNValueFn(nn.Module):\n def __init__(self, state_dim):\n super(CNNValueFn, self).__init__()\n self.state_dim = state_dim\n if self.state_dim == (1, 64, 64):\n self.encoder = MNIST_CNN(1)\n self.decoder = lambda x: x\n elif self.state_dim == (7, 7, 3):\n self.encoder = MinigridCNN(*state_dim[:-1])\n self.decoder = nn.Linear(self.encoder.image_embedding_size, 1)\n else:\n assert False\n\n def forward(self, state):\n state_values = self.decoder(self.encoder(state))\n return state_values" ]
[ [ "torch.nn.Linear" ] ]
youngmg1995/NES-Music-Maker
[ "aeda10a541cfd439cfa46c45e63411e0d98e41c1" ]
[ "VAE/full_model/model_training.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 1 17:14:19 2020\n\n@author: Mitchell\n\nmodel_training.py\n~~~~~~~~~~~~~~~~~\nThis file serves as a script for building and training our VAE model. To do\nso we used the VAE and DataSequence classes defined in the file `VAE.py`, as\nwell as helper functions from the file `dataset_utils` for loading and parsing\nour datasets.\n\nThe user has the the ability to specify several parameters that control the\nloading of our data, the structure of our model, as well as the traininig plan\nfor our model. After training is complete the script also plots metrics tracked\nduring training and saves the final model.\n\n\"\"\"\n\n# Imports\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfrom dataset_utils import load_training, load_validation\nfrom VAE import VAE, DataSequence\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, time, json\n\n\n### Load Data\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Parameters for shape of dataset (note these are also used for model def. and\n# training.)\nmeasures = 8\nmeasure_len = 96\n\n# training\ntraining_foldername = '../../nesmdb24_seprsco/train/'\ntrain_save_filename = 'transformed_dataset.json'\ndataset , labels2int_map , int2labels_map = \\\n load_training(training_foldername, train_save_filename,\n measures = measures, measure_len = measure_len)\n\n# validation\nvalidation_foldername = '../../nesmdb24_seprsco/valid/'\nval_save_filename = 'transformed_val_dataset.json'\nval_dataset = load_validation(validation_foldername,\\\n labels2int_map, val_save_filename,\n measures = measures, measure_len = measure_len)\n\n\n### Build Model\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n### Model Parameters\nlatent_dim = 124\ninput_dims = [mapping.shape[0]-1 for mapping in int2labels_map]\ndropout = .1\nmaxnorm = None\nvae_b1 , vae_b2 = .02 , .1\n\n# Build Model\nmodel = VAE(latent_dim, input_dims, measures, measure_len, dropout, \n maxnorm, vae_b1 , vae_b2)\nmodel.build([tf.TensorShape([None, measures, measure_len, input_dims[i]])\n for i in range(4)])\nmodel.summary()\n\n\n### Train Model\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Training Parameters\nbatch_size = 100\nepochs = 10\n\n# Cost Function\ncost_function = model.vae_loss\n\n# Learning_rate schedule\nlr_0 = .001\ndecay_rate = .998\nlr_decay = lambda t: lr_0 * decay_rate**t\nlr_schedule = tf.keras.callbacks.LearningRateScheduler(lr_decay)\n\n# Optimizer\noptimizer = tf.keras.optimizers.Adam()\n\n# Define callbacks\ncallbacks = [lr_schedule]\n\n# Keras Sequences for Datasets (need to use since one-hot datasets too\n# large for storing in memory)\ntraining_seq = DataSequence(dataset, int2labels_map, batch_size)\nvalidation_seq = DataSequence(val_dataset, int2labels_map, batch_size)\n\n# Compile Model\nmodel.compile(optimizer = optimizer,\n loss = cost_function)\n\n# Train model\ntic = time.perf_counter()\nhistory = model.fit_generator(generator = training_seq,\n epochs = epochs)\ntoc = time.perf_counter()\nprint(f\"Trained Model in {(toc - tic)/60:0.1f} minutes\")\n\n\n### Plot Training Metrics\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ntraining_loss = history.history['loss']\n\n# Total Loss\nplt.figure(1)\nplt.plot(training_loss, 'b', label='Training')\nplt.title('Loss vs Time')\nplt.xlabel('Training Epoch')\nplt.ylabel('Avg. Total Loss')\nplt.legend()\nplt.show()\n\n\n### Save Model and History\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Save Model Weights\nsave_model = False\nif save_model:\n checkpoint_dir = '.\\\\training_checkpoints'\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model_ckpt\")\n model.save_weights(checkpoint_prefix)\n print('Model weights saved to files: '+checkpoint_prefix+'.*')\n \n# Save Training History\nsave_history = False\nif save_history:\n checkpoint_dir = '.\\\\training_checkpoints'\n history_filename = os.path.join(checkpoint_dir, \"training_history.json\")\n with open(history_filename, 'w') as f:\n json.dump({\n key:[float(value) for value in history.history[key]] \n for key in history.history\n }, f)\n print('Training history saved to file: '+ history_filename)\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#----------------------------------END FILE------------------------------------\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "tensorflow.TensorShape", "matplotlib.pyplot.figure", "tensorflow.keras.callbacks.LearningRateScheduler", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "tensorflow.keras.optimizers.Adam" ] ]
aaronchen0316/flare
[ "47a2a89af635dfec6b41a873625ac2411da14ebb", "47a2a89af635dfec6b41a873625ac2411da14ebb" ]
[ "tests/test_mgp.py", "flare/mgp/splines_methods.py" ]
[ "import numpy as np\nimport os\nimport pickle\nimport pytest\nimport re\nimport time\nimport shutil\n\nfrom copy import deepcopy\nfrom numpy import allclose, isclose\n\nfrom flare import struc, env, gp\nfrom flare.parameters import Parameters\nfrom flare.mgp import MappedGaussianProcess\nfrom flare.lammps import lammps_calculator\nfrom flare.utils.element_coder import _Z_to_mass, _Z_to_element, _element_to_Z\nfrom flare.ase.calculator import FLARE_Calculator\nfrom flare.ase.atoms import FLARE_Atoms\nfrom ase.calculators.lammpsrun import LAMMPS\n\nfrom .fake_gp import get_gp, get_random_structure\nfrom .mgp_test import clean, compare_triplet, predict_atom_diag_var\n\nbody_list = [\"2\", \"3\"]\nmulti_list = [True, False]\nforce_block_only = False\ncurr_path = os.getcwd()\n\n\n@pytest.mark.skipif(\n not os.environ.get(\"lmp\", False),\n reason=(\n \"lmp not found \"\n \"in environment: Please install LAMMPS \"\n \"and set the $lmp env. \"\n \"variable to point to the executatble.\"\n ),\n)\n@pytest.fixture(scope=\"module\")\ndef all_gp():\n\n allgp_dict = {}\n np.random.seed(123)\n for bodies in body_list:\n for multihyps in multi_list:\n gp_model = get_gp(\n bodies,\n \"mc\",\n multihyps,\n cellabc=[1.5, 1, 2],\n force_only=force_block_only,\n noa=5,\n )\n gp_model.parallel = True\n gp_model.n_cpus = 2\n\n allgp_dict[f\"{bodies}{multihyps}\"] = gp_model\n\n yield allgp_dict\n del allgp_dict\n\n\n@pytest.fixture(scope=\"module\")\ndef all_mgp():\n\n allmgp_dict = {}\n for bodies in [\"2\", \"3\", \"2+3\"]:\n for multihyps in [False, True]:\n allmgp_dict[f\"{bodies}{multihyps}\"] = None\n\n yield allmgp_dict\n del allmgp_dict\n\n\n@pytest.fixture(scope=\"module\")\ndef all_lmp():\n\n all_lmp_dict = {}\n species = [\"H\", \"He\"]\n specie_symbol_list = \" \".join(species)\n masses = [\n f\"{i} {_Z_to_mass[_element_to_Z[species[i]]]}\" for i in range(len(species))\n ]\n parameters = {\n \"command\": os.environ.get(\"lmp\"), # set up executable for ASE\n \"newton\": \"off\",\n \"pair_style\": \"mgp\",\n \"mass\": masses,\n }\n\n # set up input params\n for bodies in body_list:\n for multihyps in multi_list:\n # create ASE calc\n label = f\"{bodies}{multihyps}\"\n files = [f\"{label}.mgp\"]\n by = \"yes\" if bodies == \"2\" else \"no\"\n ty = \"yes\" if bodies == \"3\" else \"no\"\n parameters[\"pair_coeff\"] = [\n f\"* * {label}.mgp {specie_symbol_list} {by} {ty}\"\n ]\n\n lmp_calc = LAMMPS(\n label=label,\n keep_tmp_files=True,\n tmp_dir=\"./tmp/\",\n parameters=parameters,\n files=files,\n specorder=species,\n )\n all_lmp_dict[f\"{bodies}{multihyps}\"] = lmp_calc\n\n yield all_lmp_dict\n del all_lmp_dict\n\n\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_init(bodies, multihyps, all_mgp, all_gp):\n \"\"\"\n test the init function\n \"\"\"\n\n clean()\n\n gp_model = all_gp[f\"{bodies}{multihyps}\"]\n\n # grid parameters\n grid_params = {}\n if \"2\" in bodies:\n grid_params[\"twobody\"] = {\"grid_num\": [160], \"lower_bound\": [0.02]}\n if \"3\" in bodies:\n grid_params[\"threebody\"] = {\"grid_num\": [31, 32, 33], \"lower_bound\": [0.02] * 3}\n\n lammps_location = f\"{bodies}{multihyps}\"\n data = gp_model.training_statistics\n\n try:\n mgp_model = MappedGaussianProcess(\n grid_params=grid_params,\n unique_species=data[\"species\"],\n n_cpus=1,\n lmp_file_name=lammps_location,\n var_map=\"simple\",\n )\n except:\n mgp_model = MappedGaussianProcess(\n grid_params=grid_params,\n unique_species=data[\"species\"],\n n_cpus=1,\n lmp_file_name=lammps_location,\n var_map=None,\n )\n\n all_mgp[f\"{bodies}{multihyps}\"] = mgp_model\n\n\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_build_map(all_gp, all_mgp, bodies, multihyps):\n \"\"\"\n test the mapping for mc_simple kernel\n \"\"\"\n gp_model = all_gp[f\"{bodies}{multihyps}\"]\n mgp_model = all_mgp[f\"{bodies}{multihyps}\"]\n mgp_model.build_map(gp_model)\n\n\n# with open(f'grid_{bodies}_{multihyps}.pickle', 'wb') as f:\n# pickle.dump(mgp_model, f)\n\n\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_write_model(all_mgp, bodies, multihyps):\n \"\"\"\n test the mapping for mc_simple kernel\n \"\"\"\n mgp_model = all_mgp[f\"{bodies}{multihyps}\"]\n mgp_model.write_model(f\"my_mgp_{bodies}_{multihyps}\")\n\n mgp_model.write_model(f\"my_mgp_{bodies}_{multihyps}\", format=\"pickle\")\n\n # Ensure that user is warned when a non-mean_only\n # model is serialized into a Dictionary\n with pytest.warns(Warning):\n mgp_model.var_map = \"pca\"\n mgp_model.as_dict()\n\n mgp_model.var_map = \"simple\"\n mgp_model.as_dict()\n\n\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_load_model(all_mgp, bodies, multihyps):\n \"\"\"\n test the mapping for mc_simple kernel\n \"\"\"\n name = f\"my_mgp_{bodies}_{multihyps}.json\"\n all_mgp[f\"{bodies}{multihyps}\"] = MappedGaussianProcess.from_file(name)\n os.remove(name)\n\n name = f\"my_mgp_{bodies}_{multihyps}.pickle\"\n all_mgp[f\"{bodies}{multihyps}\"] = MappedGaussianProcess.from_file(name)\n os.remove(name)\n\n\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_cubic_spline(all_gp, all_mgp, bodies, multihyps):\n \"\"\"\n test the predict for mc_simple kernel\n \"\"\"\n\n mgp_model = all_mgp[f\"{bodies}{multihyps}\"]\n delta = 1e-4\n\n if \"3\" in bodies:\n body_name = \"threebody\"\n elif \"2\" in bodies:\n body_name = \"twobody\"\n\n nmap = len(mgp_model.maps[body_name].maps)\n print(\"nmap\", nmap)\n for i in range(nmap):\n maxvalue = np.max(np.abs(mgp_model.maps[body_name].maps[i].mean.__coeffs__))\n if maxvalue > 0:\n comp_code = mgp_model.maps[body_name].maps[i].species_code\n\n if \"3\" in bodies:\n\n c_pt = np.array([[0.3, 0.4, 0.5]])\n c, cderv = (\n mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)\n )\n cderv = cderv.reshape([-1])\n\n for j in range(3):\n a_pt = deepcopy(c_pt)\n b_pt = deepcopy(c_pt)\n a_pt[0][j] += delta\n b_pt[0][j] -= delta\n a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]\n b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]\n num_derv = (a - b) / (2 * delta)\n print(\"spline\", comp_code, num_derv, cderv[j])\n assert np.isclose(num_derv, cderv[j], rtol=1e-2)\n\n elif \"2\" in bodies:\n center = np.sum(mgp_model.maps[body_name].maps[i].bounds) / 2.0\n a_pt = np.array([[center + delta]])\n b_pt = np.array([[center - delta]])\n c_pt = np.array([[center]])\n a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]\n b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]\n c, cderv = (\n mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)\n )\n cderv = cderv.reshape([-1])[0]\n num_derv = (a - b) / (2 * delta)\n print(\"spline\", num_derv, cderv)\n assert np.isclose(num_derv, cderv, rtol=1e-2)\n\n\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_predict(all_gp, all_mgp, bodies, multihyps):\n \"\"\"\n test the predict for mc_simple kernel\n \"\"\"\n\n gp_model = all_gp[f\"{bodies}{multihyps}\"]\n mgp_model = all_mgp[f\"{bodies}{multihyps}\"]\n\n # # debug\n # filename = f'grid_{bodies}_{multihyps}.pickle'\n # with open(filename, 'rb') as f:\n # mgp_model = pickle.load(f)\n\n nenv = 6\n cell = 1.0 * np.eye(3)\n cutoffs = gp_model.cutoffs\n unique_species = gp_model.training_statistics[\"species\"]\n struc_test, f = get_random_structure(cell, unique_species, nenv)\n test_envi = env.AtomicEnvironment(\n struc_test, 0, cutoffs, cutoffs_mask=gp_model.hyps_mask\n )\n\n if \"2\" in bodies:\n kernel_name = \"twobody\"\n elif \"3\" in bodies:\n kernel_name = \"threebody\"\n # compare_triplet(mgp_model.maps['threebody'], gp_model, test_envi)\n\n assert Parameters.compare_dict(\n gp_model.hyps_mask, mgp_model.maps[kernel_name].hyps_mask\n )\n\n gp_pred_en, gp_pred_envar = gp_model.predict_local_energy_and_var(test_envi)\n gp_pred = np.array([gp_model.predict(test_envi, d + 1) for d in range(3)]).T\n print(\"mgp pred\")\n mgp_pred = mgp_model.predict(test_envi)\n\n # check mgp is within 2 meV/A of the gp\n map_str = \"energy\"\n gp_pred_var = gp_pred_envar\n print(\"mgp_en, gp_en\", mgp_pred[3], gp_pred_en)\n assert np.allclose(mgp_pred[3], gp_pred_en, rtol=2e-3), f\"{bodies} body\" \\\n f\" {map_str} mapping is wrong\"\n\n # if multihyps and ('3' in bodies):\n # pytest.skip()\n\n print(\"mgp_pred\", mgp_pred[0])\n print(\"gp_pred\", gp_pred[0])\n\n print(\"isclose?\", mgp_pred[0] - gp_pred[0], gp_pred[0])\n assert np.allclose(\n mgp_pred[0], gp_pred[0], atol=1e-3\n ), f\"{bodies} body {map_str} mapping is wrong\"\n\n if mgp_model.var_map == \"simple\":\n print(bodies, multihyps)\n for i in range(struc_test.nat):\n test_envi = env.AtomicEnvironment(\n struc_test, i, cutoffs, cutoffs_mask=gp_model.hyps_mask\n )\n mgp_pred = mgp_model.predict(test_envi)\n mgp_var = mgp_pred[1]\n gp_var = predict_atom_diag_var(test_envi, gp_model, kernel_name)\n print(\"mgp_var, gp_var\", mgp_var, gp_var)\n assert np.allclose(mgp_var, gp_var, rtol=1e-2)\n\n print(\"struc_test positions\", struc_test.positions, struc_test.species_labels)\n\n\n@pytest.mark.skipif(\n not os.environ.get(\"lmp\", False),\n reason=(\n \"lmp not found \"\n \"in environment: Please install LAMMPS \"\n \"and set the $lmp env. \"\n \"variable to point to the executatble.\"\n ),\n)\n@pytest.mark.parametrize(\"bodies\", body_list)\n@pytest.mark.parametrize(\"multihyps\", multi_list)\ndef test_lmp_predict(all_lmp, all_gp, all_mgp, bodies, multihyps):\n \"\"\"\n test the lammps implementation\n \"\"\"\n\n # pytest.skip()\n\n prefix = f\"{bodies}{multihyps}\"\n\n mgp_model = all_mgp[prefix]\n gp_model = all_gp[prefix]\n lmp_calculator = all_lmp[prefix]\n ase_calculator = FLARE_Calculator(gp_model, mgp_model, par=False, use_mapping=True)\n\n # create test structure\n np.random.seed(1)\n cell = np.diag(np.array([1, 1, 1])) * 4\n nenv = 10\n unique_species = gp_model.training_statistics[\"species\"]\n cutoffs = gp_model.cutoffs\n struc_test, f = get_random_structure(cell, unique_species, nenv)\n\n # build ase atom from struc\n ase_atoms_flare = struc_test.to_ase_atoms()\n ase_atoms_flare = FLARE_Atoms.from_ase_atoms(ase_atoms_flare)\n ase_atoms_flare.set_calculator(ase_calculator)\n\n ase_atoms_lmp = deepcopy(struc_test).to_ase_atoms()\n ase_atoms_lmp.set_calculator(lmp_calculator)\n\n try:\n lmp_en = ase_atoms_lmp.get_potential_energy()\n flare_en = ase_atoms_flare.get_potential_energy()\n\n lmp_stress = ase_atoms_lmp.get_stress()\n flare_stress = ase_atoms_flare.get_stress()\n\n lmp_forces = ase_atoms_lmp.get_forces()\n flare_forces = ase_atoms_flare.get_forces()\n except Exception as e:\n os.chdir(curr_path)\n print(e)\n raise e\n\n os.chdir(curr_path)\n\n # check that lammps agrees with mgp to within 1 meV/A\n print(\"energy\", lmp_en - flare_en, flare_en)\n assert np.isclose(lmp_en, flare_en, atol=1e-3)\n print(\"force\", lmp_forces - flare_forces, flare_forces)\n assert np.isclose(lmp_forces, flare_forces, atol=1e-3).all()\n print(\"stress\", lmp_stress - flare_stress, flare_stress)\n assert np.isclose(lmp_stress, flare_stress, atol=1e-3).all()\n\n # check the lmp var\n # mgp_std = np.sqrt(mgp_pred[1])\n # print(\"isclose? diff:\", lammps_stds[atom_num]-mgp_std, \"mgp value\", mgp_std)\n # assert np.isclose(lammps_stds[atom_num], mgp_std, rtol=1e-2)\n\n clean(prefix=prefix)\n", "\"\"\"Cubic spline functions used for interpolation. \n\"\"\"\nimport numpy as np\nimport numpy\nfrom flare.mgp.cubic_splines_numba import *\n\n\nclass PCASplines:\n \"\"\"\n Build splines for PCA decomposition, mainly used for the mapping of the variance\n\n :param l_bounds: lower bound for the interpolation. \\\n E.g. 1-d for two-body, 3-d for three-body.\n :type l_bounds: numpy array\n :param u_bounds: upper bound for the interpolation.\n :type u_bounds: numpy array\n :param orders: grid numbers in each dimension. E.g, 1-d for two-body, \\\n 3-d for three-body, should be positive integers.\n :type orders: numpy array\n :param svd_rank: rank for decomposition of variance matrix,\\\n also equal to the number of mappings constructed for mapping variance.\\\n For two-body `svd_rank<=min(grid_num, train_size*3)`, \\\n for three-body `svd_rank<=min(grid_num_in_cube, train_size*3)`\n :type svd_rank: int\n \"\"\"\n\n def __init__(self, l_bounds, u_bounds, orders, svd_rank):\n self.svd_rank = svd_rank\n self.models = []\n for r in range(svd_rank):\n spline_u = CubicSpline(l_bounds, u_bounds, orders)\n self.models.append(spline_u)\n\n def build_cubic(self, y, u_bounds, l_bounds, orders):\n dim_0 = 1\n for d in range(len(y.shape) - 1):\n dim_0 *= y.shape[d]\n dim_1 = y.shape[-1]\n\n var_matr = np.reshape(y, (dim_0, dim_1))\n models = []\n for r in range(self.svd_rank):\n spline_u = CubicSpline(l_bounds, u_bounds, orders, var_matr[:, r])\n models.append(spline_u)\n return models\n\n def set_values(self, y):\n dim_0 = 1\n for d in range(len(y.shape) - 1):\n dim_0 *= y.shape[d]\n dim_1 = y.shape[-1]\n\n var_matr = np.reshape(y, (dim_0, dim_1))\n U, S, Vh = np.linalg.svd(var_matr, full_matrices=False)\n self.V = Vh[: self.svd_rank, :].T\n for r in range(self.svd_rank):\n self.models[r].set_values(S[r] * U[:, r])\n\n def __call__(self, x):\n y_pred = []\n rank = self.svd_rank\n for r in range(rank):\n y_pred.append(self.models[r](x))\n return np.array(y_pred)\n\n\nclass CubicSpline:\n\n \"\"\"\n Forked from Github repository: https://github.com/EconForge/interpolation.py.\\\n High-level API for cubic splines. \\\n Class representing a cubic spline interpolator on a regular cartesian grid.\n\n Creates a cubic spline interpolator on a regular cartesian grid.\n\n Args:\n a (numpy array of size d (float)): Lower bounds of the cartesian grid.\n b (numpy array of size d (float)): Upper bounds of the cartesian grid.\n orders (numpy array of size d (int)): Number of nodes along each \\\n dimension (=(n1,...,nd) )\n\n Other Parameters:\n values (numpy array (float)): (optional, (n1 x ... x nd) array). \\\n Values on the nodes of the function to interpolate.\n \"\"\"\n\n __grid__ = None\n __values__ = None\n __coeffs__ = None\n\n def __init__(self, a, b, orders, values=None):\n\n self.d = len(a)\n assert len(b) == self.d\n assert len(orders) == self.d\n self.a = np.array(a, dtype=float)\n self.b = np.array(b, dtype=float)\n self.orders = np.array(orders, dtype=int)\n self.dtype = self.a.dtype\n self.__coeffs__ = None\n\n if values is not None:\n self.set_values(values)\n\n def set_values(self, values):\n \"\"\"Set values on the nodes for the function to interpolate.\"\"\"\n\n values = np.array(values, dtype=float)\n\n if not np.all(np.isfinite(values)):\n raise Exception(\"Trying to interpolate non-finite values\")\n\n sh = self.orders.tolist()\n sh2 = [e + 2 for e in self.orders]\n\n values = values.reshape(sh)\n\n self.__values__ = values\n\n # this should be done without temporary memory allocation\n self.__coeffs__ = filter_coeffs(self.a, self.b, self.orders, values)\n\n def interpolate(self, points, values=None, with_derivatives=False):\n \"\"\"\n Interpolate spline at a list of points.\n\n :param points: (array-like) list of point where the spline is evaluated.\n :param values: (optional) container for inplace computation.\n :return values: (array-like) list of point where the spline is evaluated.\n \"\"\"\n\n if not np.all(np.isfinite(points)):\n raise Exception(\"Spline interpolator evaluated at non-finite points.\")\n\n if not with_derivatives:\n if points.ndim == 1:\n # evaluate only on one point\n points = np.array([points])\n N, d = points.shape\n assert d == self.d\n if values is None:\n values = np.empty(N, dtype=self.dtype)\n vec_eval_cubic_spline(\n self.a, self.b, self.orders, self.__coeffs__, points, values\n )\n return values\n else:\n N, d = points.shape\n assert d == self.d\n values, dvalues = vec_eval_cubic_splines_G(\n self.a,\n self.b,\n self.orders,\n self.__coeffs__,\n points,\n values,\n dvalues=None,\n )\n\n return values, dvalues\n\n @property\n def grid(self):\n \"\"\"Cartesian enumeration of all nodes.\"\"\"\n\n if self.__grid__ is None:\n self.__grid__ = mlinspace(self.a, self.b, self.orders)\n return self.__grid__\n\n def __call__(self, s, with_derivatives=False):\n \"\"\"Interpolate the spline at one or many points\"\"\"\n\n if s.ndim == 1:\n res = self.__call__(numpy.atleast_2d(s))\n return res[0]\n\n return self.interpolate(s, with_derivatives=with_derivatives)\n\n\ndef vec_eval_cubic_spline(a, b, orders, coefs, points, values=None):\n \"\"\"\n Forked from Github repository: https://github.com/EconForge/interpolation.py.\\\n Evaluates a cubic spline at many points\n\n :param a: Lower bounds of the cartesian grid.\n :type a: numpy array of size d (float)\n :param b: Upper bounds of the cartesian grid.\n :type b: numpy array of size d (float)\n :param orders: Number of nodes along each dimension (=(n1,...,nd) )\n :type orders: numpy array of size d (int)\n :param coefs: Filtered coefficients.\n :type coefs: array of dimension d, and size (n1+2, ..., nd+2)\n :param point: List of points where the splines must be interpolated.\n :type point: array of size N x d\n :param values: (optional) If not None, contains the result.\n :type values: array of size N\n\n :return value: Interpolated values. values[i] contains spline evaluated at point points[i,:].\n :type value: array of size N\n \"\"\"\n\n a = numpy.array(a, dtype=float)\n b = numpy.array(b, dtype=float)\n orders = numpy.array(orders, dtype=int)\n\n d = a.shape[0]\n\n if values is None:\n N = points.shape[0]\n values = numpy.empty(N)\n\n if d == 1:\n vec_eval_cubic_spline_1(a, b, orders, coefs, points, values)\n elif d == 2:\n vec_eval_cubic_spline_2(a, b, orders, coefs, points, values)\n elif d == 3:\n vec_eval_cubic_spline_3(a, b, orders, coefs, points, values)\n elif d == 4:\n vec_eval_cubic_spline_4(a, b, orders, coefs, points, values)\n\n return values\n\n\ndef vec_eval_cubic_splines_G(a, b, orders, mcoefs, points, values=None, dvalues=None):\n\n a = numpy.array(a, dtype=float)\n b = numpy.array(b, dtype=float)\n orders = numpy.array(orders, dtype=int)\n\n d = a.shape[0]\n N = points.shape[0]\n # n_sp = mcoefs.shape[-1]\n n_sp = 1\n\n if values is None:\n values = numpy.empty((N, n_sp))\n\n if dvalues is None:\n dvalues = numpy.empty((N, d, n_sp))\n\n if d == 1:\n vec_eval_cubic_splines_G_1(a, b, orders, mcoefs, points, values, dvalues)\n\n elif d == 2:\n vec_eval_cubic_splines_G_2(a, b, orders, mcoefs, points, values, dvalues)\n\n elif d == 3:\n vec_eval_cubic_splines_G_3(a, b, orders, mcoefs, points, values, dvalues)\n\n elif d == 4:\n vec_eval_cubic_splines_G_4(a, b, orders, mcoefs, points, values, dvalues)\n\n return [values, dvalues]\n" ]
[ [ "numpy.array", "numpy.isclose", "numpy.random.seed", "numpy.sum", "numpy.eye", "numpy.allclose", "numpy.abs" ], [ "numpy.array", "numpy.empty", "numpy.reshape", "numpy.isfinite", "numpy.linalg.svd", "numpy.atleast_2d" ] ]
csala/SDGym
[ "1e5177a67e897dd097e21aa3e81aad708330c38f" ]
[ "sdgym/benchmark.py" ]
[ "import logging\nimport os\nimport types\nfrom datetime import datetime\n\nimport pandas as pd\n\nfrom sdgym.data import load_dataset\nfrom sdgym.evaluate import compute_scores\nfrom sdgym.synthesizers import BaseSynthesizer\n\nLOGGER = logging.getLogger(__name__)\n\nBASE_DIR = os.path.dirname(__file__)\nLEADERBOARD_PATH = os.path.join(BASE_DIR, 'leaderboard.csv')\n\nDEFAULT_DATASETS = [\n \"adult\",\n \"alarm\",\n \"asia\",\n \"census\",\n \"child\",\n \"covtype\",\n \"credit\",\n \"grid\",\n \"gridr\",\n \"insurance\",\n \"intrusion\",\n \"mnist12\",\n \"mnist28\",\n \"news\",\n \"ring\"\n]\n\n\ndef compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3):\n \"\"\"Compute the scores of a synthesizer over a list of datasets.\n\n The results are returned in a raw format as a ``pandas.DataFrame`` containing:\n - One row for each dataset+scoring method (for example, a classifier)\n - One column for each computed metric\n - The columns:\n - dataset\n - distance\n - name (of the scoring method)\n - iteration\n\n For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2\n iterations produces a table similar to this::\n\n dataset name iter distance accuracy f1 syn_likelihood test_likelihood\n adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN\n adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN\n adult Logistic... 0 0.0 0.79 0.66 NaN NaN\n adult MLP... 0 0.0 0.84 0.67 NaN NaN\n adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN\n adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN\n adult Logistic... 1 0.0 0.79 0.65 NaN NaN\n adult MLP... 1 0.0 0.84 0.64 NaN NaN\n asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24\n asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24\n \"\"\"\n results = list()\n for dataset_name in datasets:\n LOGGER.info('Evaluating dataset %s', dataset_name)\n train, test, meta, categoricals, ordinals = load_dataset(dataset_name, benchmark=True)\n\n for iteration in range(iterations):\n try:\n synthesized = synthesizer(train, categoricals, ordinals)\n scores = compute_scores(train, test, synthesized, meta)\n scores['dataset'] = dataset_name\n scores['iteration'] = iteration\n results.append(scores)\n except Exception:\n LOGGER.exception('Error computing scores for %s on dataset %s - iteration %s',\n _get_synthesizer_name(synthesizer), dataset_name, iteration)\n\n return pd.concat(results, sort=False)\n\n\ndef _dataset_summary(grouped_df):\n dataset = grouped_df.name\n scores = grouped_df.mean().dropna()\n scores.index = dataset + '/' + scores.index\n\n return scores\n\n\ndef _summarize_scores(scores):\n \"\"\"Computes a summary of the scores obtained by a synthesizer.\n\n The raw scores returned by the ``compute_benchmark`` function are summarized\n by grouping them by dataset and computing the average.\n\n The results are then put in a ``pandas.Series`` object with one value per\n dataset and metric.\n\n As an example, the summary of a synthesizer that has been evaluated on the\n ``adult`` and the ``asia`` datasets produces the following output::\n\n adult/accuracy 0.8765\n adult/f1_micro 0.7654\n adult/f1_macro 0.7654\n asia/syn_likelihood -2.5364\n asia/test_likelihood -2.4321\n dtype: float64\n\n Args:\n scores (pandas.DataFrame):\n Raw Scores dataframe as returned by the ``compute_benchmark`` function.\n\n Returns:\n pandas.Series:\n Summarized scores series in the format described above.\n \"\"\"\n scores = scores.drop(['distance', 'iteration', 'name'], axis=1, errors='ignore')\n\n grouped = scores.groupby('dataset').apply(_dataset_summary)\n if isinstance(grouped, pd.Series):\n # If more than one dataset, grouped result is a series\n # with a multilevel index.\n return grouped.droplevel(0)\n\n # Otherwise, if there is only one dataset, it is DataFrame\n return grouped.iloc[0]\n\n\ndef _get_synthesizer_name(synthesizer):\n \"\"\"Get the name of the synthesizer function or class.\n\n If the given synthesizer is a function, return its name.\n If it is a method, return the name of the class to which\n the method belongs.\n\n Args:\n synthesizer (function or method):\n The synthesizer function or method.\n\n Returns:\n str:\n Name of the function or the class to which the method belongs.\n \"\"\"\n if isinstance(synthesizer, types.MethodType):\n synthesizer_name = synthesizer.__self__.__class__.__name__\n else:\n synthesizer_name = synthesizer.__name__\n\n return synthesizer_name\n\n\ndef _get_synthesizers(synthesizers):\n \"\"\"Get the dict of synthesizers from the input value.\n\n If the input is a synthesizer or an iterable of synthesizers, get their names\n and put them on a dict.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n A synthesizer (function or method or class) or an iterable of synthesizers\n or a dict containing synthesizer names as keys and synthesizers as values.\n\n Returns:\n dict[str, function]:\n dict containing synthesizer names as keys and function as values.\n\n Raises:\n TypeError:\n if neither a synthesizer or an iterable or a dict is passed.\n \"\"\"\n if callable(synthesizers):\n synthesizers = {_get_synthesizer_name(synthesizers): synthesizers}\n if isinstance(synthesizers, (list, tuple)):\n synthesizers = {\n _get_synthesizer_name(synthesizer): synthesizer\n for synthesizer in synthesizers\n }\n elif not isinstance(synthesizers, dict):\n raise TypeError('`synthesizers` can only be a function, a class, a list or a dict')\n\n for name, synthesizer in synthesizers.items():\n # If the synthesizer is one of the SDGym Synthesizer classes,\n # create and instance and replace it with its fit_sample method.\n if isinstance(synthesizer, type) and issubclass(synthesizer, BaseSynthesizer):\n synthesizers[name] = synthesizer().fit_sample\n\n return synthesizers\n\n\ndef benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True,\n leaderboard_path=LEADERBOARD_PATH, replace_existing=True):\n \"\"\"Compute the benchmark scores for the synthesizers and return a leaderboard.\n\n The ``synthesizers`` object can either be a single synthesizer or, an iterable of\n synthesizers or a dict containing synthesizer names as keys and synthesizers as values.\n\n If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard\n stored in the ``lederboard_path``. By default, the leaderboard used is the one which\n is included in the package, which contains the scores obtained by the SDGym Synthesizers.\n\n If ``replace_existing`` is ``True`` and any of the given synthesizers already existed\n in the leaderboard, the old rows are dropped.\n\n Args:\n synthesizers (function, class, list, tuple or dict):\n The synthesizer or synthesizers to evaluate. It can be a single synthesizer\n (function or method or class), or an iterable of synthesizers, or a dict\n containing synthesizer names as keys and synthesizers as values. If the input\n is not a dict, synthesizer names will be extracted from the given object.\n datasets (list[str]):\n Names of the datasets to use for the benchmark. Defaults to all the ones available.\n iterations (int):\n Number of iterations to perform over each dataset and synthesizer. Defaults to 3.\n add_leaderboard (bool):\n Whether to append the obtained scores to the previous leaderboard or not. Defaults\n to ``True``.\n leaderboard_path (str):\n Path to where the leaderboard is stored. Defaults to the leaderboard included\n with the package, which contains the scores obtained by the SDGym synthesizers.\n replace_existing (bool):\n Whether to replace old scores or keep them in the returned leaderboard. Defaults\n to ``True``.\n\n Returns:\n pandas.DataFrame:\n Table containing one row per synthesizer and one column for each dataset and metric.\n \"\"\"\n synthesizers = _get_synthesizers(synthesizers)\n\n scores = list()\n for synthesizer_name, synthesizer in synthesizers.items():\n synthesizer_scores = compute_benchmark(synthesizer, datasets, iterations)\n summary_row = _summarize_scores(synthesizer_scores)\n summary_row.name = synthesizer_name\n scores.append(summary_row)\n\n leaderboard = pd.DataFrame(scores)\n leaderboard['timestamp'] = datetime.utcnow()\n\n if add_leaderboard:\n old_leaderboard = pd.read_csv(\n leaderboard_path,\n index_col=0,\n parse_dates=['timestamp']\n )[leaderboard.columns]\n if replace_existing:\n old_leaderboard.drop(labels=[leaderboard.index], errors='ignore', inplace=True)\n\n leaderboard = old_leaderboard.append(leaderboard, sort=False)\n\n return leaderboard\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.concat" ] ]
lkorczowski/cleandata
[ "dadfc2768c3b980ab0e4871ebd86eaeeb4b7eaaf" ]
[ "notebooks/template_preprocessing_columns.py" ]
[ "import pandas as pd\nfrom utils.config import Config\nimport numpy as np\nimport pandas as pd\n\n\ndef fun_clean_categogy1(array, keyw1, index, BOW):\n compty = 0\n c = 0\n for elm in array:\n if elm == \"oui\" or elm == \"parfois\":\n BOW[c].append(keyw1[index])\n compty += 1\n c += 1\n # print(compty)\n return BOW\n\n#Ajout des keywords de la catégorie 2 ATTENTION, ici j'ajoute tout le contenu des colonnes, donc il peut y avoir\n# une grande variété de mots qui sugissent à cause d'ici. De plus, ce sont souvent des mots composés ou des\n# séquences de mots. On peut envisager de ne sélectionner que le premier mot par exemple.\ndef fun_clean_categogy2(array, BOW):\n compty = 0\n c = 0\n for elm in array:\n if not elm == \"\":\n if not BOW[c].__contains__(elm):\n BOW[c].append(elm)\n compty += 1\n c += 1\n # print(compty)\n return BOW\n\n\ndef fun_clean_categogy3(array, keyw3, index, BOW, list_THR):\n compty = 0\n c = 0\n for elm in array:\n # print(elm)\n if not np.isnan(float(str(elm).replace(\",\", \".\"))):\n if float(str(elm).replace(\",\", \".\")) > list_THR[index]:\n if not BOW[c].__contains__(elm):\n BOW[c].append(keyw3[index])\n compty += 1\n c += 1\n print(compty)\n return BOW\n\n\nif __name__ == '__main__':\n # %%\n df = pd.read_csv(Config.csv_files[-1], sep=';', encoding='ISO-8859-1')\n df.columns\n #\n # d = {'col1': [1, 2], 'col2': [3, 4]}\n # df = pd.DataFrame(data=d)\n\n\n List_cat1 = [\"difficulté endormisst\", \"fatigue au reveil\", \"hyperacousie\", \"surdité\", \"SDE\", \"vertiges\",\n \"depression\", \"anxiété\"]\n\n #Keywords à associer aux colonnes de la catégorie 1\n keyw1 = [\"endormissement\", \"fatigue\", \"hyperacousie\", \"surdité\", \"somnolence\", \"vertige\", \"dépression\", \"anxiété\"]\n\n List_cat2 = [\"timbre acouphène\", \"type de douleurs\", \"type otalgie\", \"type de vertiges\",\n \"caractere particulier\", \"mode apparition\"]\n\n List_cat3 = [\"EVA depression\", \"epworth\", \"EVA anxiété\", \"EVA douleurs\", \"EVA hyperac\", \"EVA hypoac\",\n \"EVA Otalgie 1\", \"EVA SADAM\", \"EVA vertiges\", \"ISI\", \"score khalfa hyperacousie\", \"EVA concentration\"]\n\n # Keywords à associer aux colonnes de la catégorie 3\n keyw3 = [\"dépression\", \"somnolence\", \"anxiété\", \"douleurs\", \"hyperacousie\", \"hypoacousie\", \"otalgie\", \"mâchoire\",\n \"vertige\", \"sommeil\", \"hyperacousie\", \"concentration\"]\n\n # seuils de sélections à associer aux colonnes de la catégorie 3\n List_THR = [5, 10, 5, 5, 5, 5, 4, 3, 3, 12, 20, 5]\n\n cat4 = [\"intensité ac\"]\n\n compt = 0\n #Liste de mots clés associés à chaque patient. Une liste par patient\n BOW = [[] for i in range(len(df[df.columns[0]]))]\n\n #ajout des keywords de la categorie 1 à la liste des bag of words BOW\n for colname in List_cat1:\n # print(df[colname]) # show value before\n print(colname)\n BOW = fun_clean_categogy1(df[colname], keyw1, compt, BOW)\n compt += 1\n\n # ajout des keywords de la categorie 2 à la liste des bag of words BOW\n compt=0\n for colname in List_cat2:\n print(colname)\n BOW = fun_clean_categogy2(df[colname], BOW)\n compt += 1\n\n # ajout des keywords de la categorie 3 à la liste des bag of words BOW\n compt=0\n for colname in List_cat3:\n print(colname)\n BOW = fun_clean_categogy3(df[colname], keyw3, compt, BOW, List_THR)\n compt += 1\n\n #Nettoyage des valeurs \"NaN\" copiées par erreur par la catégorie 2\n for elm in BOW:\n if elm.__contains__(np.nan):\n elm.pop(elm.index(np.nan))\n\n print(BOW[:200]) # petit extrait de la liste des bag of words\n BOW2=[]\n for elm in BOW:\n stri=\"\"\n for st in elm:\n stri = stri + \" \" + st\n BOW2.append(stri)\n\n df2 = pd.DataFrame(BOW2)\n df2.to_csv('lettres_persanes.csv', sep=';', encoding='ISO-8859-1')\n print(df2)\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
GBruening/succes_predictor
[ "ae60bdf4e7c21228d35411f7e1527b78f34dd7ad" ]
[ "Pulling data/apiv2_pull.py" ]
[ "#%% First\nimport numpy as np\nimport json\nimport os\nimport pandas as pd\nimport requests\nfrom contextlib import closing\nimport time\nfrom datetime import datetime\nfrom requests.models import HTTPBasicAuth\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom requests import get\nfrom requests_futures.sessions import FuturesSession\nfrom bs4 import BeautifulSoup\n\nfrom dotenv import load_dotenv, dotenv_values\nfrom requests_oauthlib import OAuth2, OAuth2Session\n\n#%%\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\nenv_vars = dotenv_values('config.env')\nclient_id = env_vars['id']\nclient_secret = env_vars['secret']\ncode = env_vars['code']\n\ncallback_uri = \"http://localhost:8080\"\nauthorize_url = \"https://www.warcraftlogs.com/oauth/authorize\"\ntoken_url = \"https://www.warcraftlogs.com/oauth/token\"\n\n# warcraftlogs = OAuth2Session(client_id, redirect_uri=callback_uri)\n# authorization_url, state = warcraftlogs.authorization_url(authorize_url,\n# access_type=\"offline\")\n\n# token = warcraftlogs.fetch_token(token_url = token_url,\n# auth = HTTPBasicAuth(client_id, client_secret),\n# code = code)\n# access_token = token['access_token']\n# refresh_token = token['refresh_token']\n# with open('refresh_token.env', 'w') as f:\n# f.write('refresh_token = '+str(refresh_token)+'\\nacces_token = '+str(access_token))\n\nif os.path.isfile('refresh_token.env'):\n env_vars = dotenv_values('refresh_token.env')\n refresh_token = env_vars['refresh_token']\n access_token = env_vars['access_token']\nelse:\n raise 'Get your fresh token dumby'\n\n# print(refresh_token)\ntry:\n warcraftlogs = OAuth2Session(client_id = client_id)\n graphql_endpoint = \"https://www.warcraftlogs.com/api/v2/client\"\n headers = {\"Authorization\": f\"Bearer {access_token}\"}\n\n query = \"\"\"{\n reportData{\n reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){\n data{\n fights(difficulty: 5){\n name \n averageItemLevel\n # friendlyPlayers\n id\n }\n }\n }\n }\n }\"\"\"\n\n r = requests.post(graphql_endpoint, json={\"query\": query}, headers=headers) \nexcept:\n token = warcraftlogs.refresh_token(token_url = token_url,\n auth = HTTPBasicAuth(client_id, client_secret),\n refresh_token = refresh_token)\n access_token = token['access_token']\n refresh_token = token['refresh_token']\n with open('refresh_token.env', 'w') as f:\n f.write('refresh_token = '+str(refresh_token)+'\\naccess_token = '+str(access_token))\n \n warcraftlogs = OAuth2Session(client_id = client_id)\n graphql_endpoint = \"https://www.warcraftlogs.com/api/v2/client\"\n headers = {\"Authorization\": f\"Bearer {access_token}\"}\n\n query = \"\"\"{\n reportData{\n reports(guildID: 95321, endTime: 1622872800000.0, startTime: 1605855600000.0){\n data{\n fights(difficulty: 5){\n name \n averageItemLevel\n # friendlyPlayers\n id\n }\n }\n }\n }\n }\"\"\"\n\n r = requests.post(graphql_endpoint, json={\"query\": query}, headers=headers) \n\nwith open('..//get_guild_list/guild_list_hungering.json', encoding='utf-8') as f:\n guilds = json.load(f)\n\n#%%\n\ndef is_good_response_json(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('json') > -1)\n\ndef get_guild_id(guild):\n try:\n guild_id = int(guild['id'])\n except:\n query = \"\"\" \n {\n guildData{\n guild(name: \"%s\", serverSlug: \"%s\", serverRegion: \"%s\"){\n id\n }\n }\n }\n \"\"\" % (guild['name'], guild['realm'].replace(' ', '-'), guild['region'])\n r = requests.post(graphql_endpoint, json={\"query\": query}, headers=headers)\n guild_id = r.json()['data']['guildData']['guild']['id']\n return guild_id\n\ndef get_log_list(guild):\n guild['id'] = get_guild_id(guild)\n query = (\"{\"\n f\"reportData{{\"\n f\" reports(guildID: {guild['id']}, zoneID: 26){{\"\n f\" data{{\"\n f\" code\"\n f\" startTime\"\n f\" endTime\"\n f\" }}\"\n f\" }}\"\n f\"}}\"\n f\"}}\")\n r = requests.post(graphql_endpoint, json={\"query\": query}, headers=headers)\n log_list = r.json()['data']['reportData']['reports']['data']\n\n return log_list\n\ndef get_log_list_apiv1(guild):\n with open('..//..//Warcraftlogs//api_key.txt.') as f:\n api_key = f.readlines()[0]\n \n link = \"https://www.warcraftlogs.com:443/v1/reports/guild/\" + \\\n guild['name'] + \"/\" + guild['realm'].replace(' ', '-').replace(\"'\",\"\")+ \"/\" + \\\n guild['region'] + \"?api_key=\" + api_key\n\n guild_logs = requests.get(link)\n log_list = guild_logs.json()\n\n log_list_new = []\n for item in log_list:\n if item['zone'] == 26:\n log_list_new.append({'code': item['id'],\n 'startTime': item['start'],\n 'endTime': item['end']})\n \n return log_list_new\n\ndef get_pulls(log, guild):\n log_id = log['code']\n query = \"\"\"\n {\n reportData{\n report(code: \"%s\"){\n fights(difficulty: 5){\n name\n id\n averageItemLevel\n bossPercentage\n kill\n startTime\n endTime\n }\n }\n }\n }\n \"\"\" % (log_id)\n\n r = requests.post(graphql_endpoint, json={\"query\": query}, headers=headers)\n fight_list = r.json()['data']['reportData']['report']['fights']\n for k in range(len(fight_list)):\n fight_list[k].update({'log_code': log_id}) \n return fight_list\n\ndef get_fight_info(fight, guild, unique_id):\n code = fight['log_code']\n fight_ID = fight['id']\n start_time = fight['start_time']\n end_time = fight['end_time']\n query = \"\"\"\n {\n reportData{\n report(code: \"%s\"){\n table(fightIDs: %s, startTime: %s, endTime: %s)\n }\n }\n }\n \"\"\" % (code, fight_ID, str(start_time), str(end_time))\n r = requests.post(graphql_endpoint, json={\"query\": query}, headers=headers)\n table = r.json()['data']['reportData']['report']['table']['data']\n comp = table['composition']\n roles = table['playerDetails']\n player_list = []\n for role in roles:\n players = roles[role]\n for player in players:\n try:\n gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]\n ilvl = np.mean(gear_ilvl)\n except:\n try:\n ilvl = player['minItemLevel']\n except:\n ilvl = np.NaN\n\n try:\n covenant = player['combatantInfo']['covenantID']\n except:\n covenant = np.NaN\n\n try:\n spec = player['specs'][0]\n except:\n spec = np.NaN\n\n try:\n stats = player['combatantInfo']['stats']\n primaries = ['Agility','Intellect','Strength']\n for primary in primaries:\n if primary in stats.keys():\n break\n primary= stats[primary]['min']\n mastery= stats['Mastery']['min']\n crit= stats['Crit']['min']\n haste= stats['Haste']['min']\n vers= stats['Versatility']['min']\n stamina= stats['Stamina']['min']\n except:\n primary = np.NaN\n mastery = np.NaN\n crit = np.NaN\n haste = np.NaN\n vers = np.NaN\n stamina = np.NaN\n \n player_info= {'unique_id': unique_id,\n 'class': player['type'],\n 'spec': spec,\n 'role': role,\n 'ilvl': ilvl,\n 'covenant': covenant,\n 'primary': primary,\n 'mastery': mastery,\n 'crit': crit,\n 'haste': haste,\n 'vers': vers,\n 'stamina': stamina,\n 'boss_name': fight['name']}\n player_list.append(player_info)\n return player_list\n\n\n# %% Setup the SQL Stuff\nfrom sqlalchemy import create_engine\nimport psycopg2\nserver = 'localhost'\ndatabase = 'nathria_prog'\nusername = 'postgres'\npassword = 'postgres'\n\nif 'conn' in locals():\n conn.close()\nengine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')\nconn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)\ncurs = conn.cursor()\n\ncurs.execute(\"select exists(select * from information_schema.tables where table_name=%s)\",\\\n ('nathria_prog_v2',))\nif curs.fetchone()[0]:\n curs.execute('select distinct guild_name from nathria_prog_v2')\n already_added_guilds = [item[0] for item in curs.fetchall()]\n already_added_length = len(already_added_guilds)\nelse:\n already_added_guilds = []\n already_added_length = 0\n\ndef check_in_sql(fight):\n unique_id = fight['unique_id']\n curs.execute(\"select * from nathria_prog_v2 where unique_id = '%s'\" % (unique_id))\n if curs.fetchone() is None:\n check_one = False\n else:\n check_one = True\n\n curs.execute(\"select * from nathria_prog_v2 where start_time > %s and end_time < %s and guild_name = '%s';\" \\\n % (fight['start_time']-60, fight['end_time']+60, fight['guild_name']))\n if curs.fetchone() is None:\n check_two = False\n else:\n check_two = True\n check = check_one or check_two\n return check\n\ndef add_to_sql(curs, table, info):\n placeholders = ', '.join(['%s'] * len(info))\n columns = ', '.join(info.keys())\n sql = \"INSERT INTO %s ( %s ) VALUES ( %s )\" % (str(table), columns, placeholders)\n curs.execute(sql, list(info.values()))\n\n#%% This is for futures use\n \ndef make_logs_query(log):\n log_id = log['code']\n query = \"\"\"\n {\n reportData{\n report(code: \"%s\"){\n fights(difficulty: 5){\n name\n id\n averageItemLevel\n bossPercentage\n kill\n startTime\n endTime\n }\n }\n }\n }\n \"\"\" % (log_id)\n\n return query\n\ndef get_log_args(log, graphql_endpoint, headers):\n args = {'url': graphql_endpoint,\n 'json': {'query': make_logs_query(log)},\n 'headers': headers}\n return args\n\ndef get_fight_list(log_list, graphql_endpoint, headers):\n session = FuturesSession(max_workers = 2)\n futures = [session.post(**get_log_args(log, graphql_endpoint, headers)) for log in log_list]\n\n fights_list = []\n for q, item in enumerate(futures):\n result = item.result()\n if result.status_code!=200:\n print(result.status_code)\n fights = result.json()['data']['reportData']['report']['fights']\n for k, fight in enumerate(fights):\n fight['log_code'] = log_list[q]['code']\n fight['log_start'] = log_list[q]['startTime']\n fight['log_end'] = log_list[q]['endTime']\n fight['unique_id'] = log_list[q]['code'] + '_' + str(fight['id'])\n fights_list.extend([fight])\n \n return fights_list\n\ndef get_prog_pulls(df, boss_name):\n if type(df.iloc[0]['start_time']) != 'int':\n df['start_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['start_time']]\n df['end_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['end_time']]\n kills_df = df.query('name == \"'+boss_name+'\"').query('zoneDifficulty == 5').query('kill == True')\n first_kill_time = min(kills_df['start_time'])\n return df.query('name == \"'+boss_name+'\"').query('zoneDifficulty == 5').query('start_time <= '+str(first_kill_time))\n\ndef add_pull_num(df):\n df = df.sort_values(by = ['start_time'])\n df.insert(loc = 0, column = 'pull_num', value = np.arange(len(df))+1)\n return df\n\ndef combine_boss_df(df):\n boss_names = [\n 'Shriekwing', \\\n 'Huntsman Altimor',\n 'Hungering Destroyer', \\\n \"Sun King's Salvation\",\n \"Artificer Xy'mox\", \\\n 'Lady Inerva Darkvein', \\\n 'The Council of Blood', \\\n 'Sludgefist', \\\n 'Stone Legion Generals', \\\n 'Sire Denathrius']\n combine_df = pd.DataFrame()\n for k, boss_name in enumerate(np.unique(df['name'])):\n if boss_name in boss_names and boss_name in np.unique(df['name']):\n combine_df = combine_df.append(add_pull_num(df.copy(deep = True).query('name == \"'+boss_name+'\"')))\n combine_df = combine_df.reset_index().drop(columns = 'index')\n return combine_df\n\nn_start = 3500\nfor gnum, guild in enumerate(guilds[n_start:]):\n if guild['name'] in already_added_guilds:\n continue\n # log_list = get_log_list(guild)\n try:\n log_list = get_log_list_apiv1(guild)\n if len(log_list) == 0:\n print(f'Log list empty for {guild[\"name\"]}')\n fightdf = pd.DataFrame()\n playerdf = pd.DataFrame()\n print(f'Parsing guild {guild[\"name\"]} (#{gnum+1+n_start} of {len(guilds)})')\n fight_list = get_fight_list(log_list, graphql_endpoint, headers)\n fightdf = pd.DataFrame()\n for q, fight in enumerate(fight_list):\n fight['boss_perc'] = fight.pop('bossPercentage')\n fight['average_item_level'] = fight.pop('averageItemLevel')\n fight['unique_id'] = fight['log_code'] + '_' + str(fight['id'])\n fight['start_time'] = fight.pop('startTime')\n fight['end_time'] = fight.pop('endTime')\n fight['guild_name'] = guild['name']\n fight['guild_realm'] = guild['realm']\n fight['guild_region'] = guild['region']\n fightdf = fightdf.append(pd.DataFrame(fight, index=['i',]))\n fightdf = combine_boss_df(fightdf.copy(deep = True))\n fightdf.to_sql('nathria_prog_v2', engine, if_exists='append')\n if len(fightdf)>1:\n print(f'Adding to SQL guild {guild[\"name\"]}')\n time.sleep(3)\n except:\n continue\n\n#%%\nasdfasdf\nfrom sqlalchemy import create_engine\nimport psycopg2\nserver = 'localhost'\ndatabase = 'nathria_prog'\nusername = 'postgres'\npassword = 'postgres'\n\nif 'conn' in locals():\n conn.close()\nengine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')\nconn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)\ncurs = conn.cursor()\n\ncurs.execute(\"select exists(select * from information_schema.tables where table_name=%s)\",\\\n ('nathria_prog_v2',))\nif curs.fetchone()[0]:\n curs.execute('select distinct guild_name from nathria_prog_v2')\n logged_guilds = [item[0] for item in curs.fetchall()]\nelse:\n logged_guilds = []\n \ndef make_fights_query(fight):\n code = fight['log_code']\n fight_ID = fight['id']\n start_time = fight['start_time']\n end_time = fight['end_time']\n query = \"\"\"\n {\n reportData{\n report(code: \"%s\"){\n table(fightIDs: %s, startTime: %s, endTime: %s)\n }\n }\n }\n \"\"\" % (code, fight_ID, str(start_time), str(end_time))\n\n return query\n\ndef get_fight_args(log, graphql_endpoint, headers):\n args = {'url': graphql_endpoint,\n 'json': {'query': make_fights_query(log)},\n 'headers': headers}\n return args\n\ndef get_fight_table(fights_list, graphql_endpoint, headers):\n session = FuturesSession(max_workers = 2)\n futures = [session.post(**get_fight_args(fight, graphql_endpoint, headers)) for fight in fights_list]\n\n fights_tables = []\n for k, item in enumerate(futures):\n result = item.result()\n if result.status_code!=200:\n print(result.status_code)\n # if is_good_response_json(item.result()):\n try:\n fights_tables.append(result.json()['data']['reportData']['report']['table']['data'])\n except:\n pass\n return fights_tables\n\ndef parse_fight_table(table, boss_name, unique_id, guild_name):\n\n comp = table['composition']\n roles = table['playerDetails']\n player_list = []\n for role in roles:\n players = roles[role]\n for player in players:\n try:\n gear_ilvl = [piece['itemLevel'] for piece in player['combatantInfo']['gear']]\n ilvl = np.mean(gear_ilvl)\n except:\n try:\n ilvl = player['minItemLevel']\n except:\n ilvl = np.NaN\n\n try:\n covenant = player['combatantInfo']['covenantID']\n except:\n covenant = np.NaN\n\n try:\n spec = player['specs'][0]\n except:\n spec = np.NaN\n\n try:\n stats = player['combatantInfo']['stats']\n primaries = ['Agility','Intellect','Strength']\n for primary in primaries:\n if primary in stats.keys():\n break\n primary= stats[primary]['min']\n mastery= stats['Mastery']['min']\n crit= stats['Crit']['min']\n haste= stats['Haste']['min']\n vers= stats['Versatility']['min']\n stamina= stats['Stamina']['min']\n except:\n primary = np.NaN\n mastery = np.NaN\n crit = np.NaN\n haste = np.NaN\n vers = np.NaN\n stamina = np.NaN\n \n player_info= {'unique_id': unique_id,\n 'name': player['name'],\n 'guild_name': guild_name,\n 'server': player['server'],\n 'class': player['type'],\n 'spec': spec,\n 'role': role,\n 'ilvl': ilvl,\n 'covenant': covenant,\n 'primary': primary,\n 'mastery': mastery,\n 'crit': crit,\n 'haste': haste,\n 'vers': vers,\n 'stamina': stamina,\n 'boss_name': boss_name}\n player_list.append(player_info)\n return player_list\n\nfor guild_name in logged_guilds:\n curs.execute(f\"select * from nathria_prog_v2 where guild_name = '{guild_name}'\")\n pulls = pd.DataFrame(curs.fetchall())\n pulls.columns = [desc[0] for desc in curs.description]\n fights_list = pulls.to_dict('records')\n\n curs.execute(f\"select distinct unique_id from nathria_prog_v2_players where guild_name = '{guild_name}'\")\n added_fights = [item[0] for item in curs.fetchall()]\n fight_list = [fight for fight in fights_list if fight['unique_id'] not in added_fights]\n \n if len(fight_list)>1:\n fights_tables = get_fight_table(fights_list, graphql_endpoint, headers)\n\n playerdf = pd.DataFrame()\n for q, table in enumerate(fights_tables):\n unique_id = fights_list[q]['unique_id']\n guild_name = guild_name\n player_info = parse_fight_table(table, fights_list[q]['name'], unique_id, guild_name)\n for player in player_info:\n for player in player_info:\n playerdf = playerdf.append(pd.DataFrame(player, index=['i',]))\n if len(playerdf)>1:\n print(f'Adding to SQL guild player info {guild[\"name\"]}')\n playerdf.to_sql('nathria_prog_v2_players', engine, if_exists='append')" ]
[ [ "pandas.DataFrame", "numpy.mean", "numpy.unique" ] ]
Monika199211/olympics-data-analysis
[ "bfd0d1ff2b2283d838314754ff21cdd753ae0fb3" ]
[ "code.py" ]
[ "# --------------\n#Importing header files\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Path of the file is stored in the variable path\ndata=pd.read_csv(path)\n#Code starts here\ndata.rename(columns={'Total':'Total_Medals'},inplace=True)\n# Data Loading \ndata['Better_Event'] = np.where(data['Total_Summer']> data['Total_Winter'], 'Summer', 'Winter')\ndata['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])\nbetter_event=data['Better_Event'].value_counts().idxmax()\n\ndata.head()\n\n# Summer or Winter\n\n\n# Top 10\n\ndata.head(10)\n\n# Plotting top 10\n\n\n# Top Performing Countries\ntop_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]\ntop_countries=top_countries[:-1]\ntop_countries\n# Best in the world \ndef top_ten(df,col):\n country_list=[]\n country_list= list((top_countries.nlargest(10,col)['Country_Name']))\n return country_list\ntop_10_summer=top_ten(top_countries,'Total_Summer')\ntop_10_winter=top_ten(top_countries,'Total_Winter')\ntop_10=top_ten(top_countries,'Total_Medals')\na=set(top_10_summer).intersection(set(top_10_winter))\nb=a.intersection(set(top_10))\ncommon=list(b)\nsummer_df= data[data['Country_Name'].isin(top_10_summer)]\nsummer_df.head()\nwinter_df= data[data['Country_Name'].isin(top_10_winter)]\nwinter_df.head()\ntop_df= data[data['Country_Name'].isin(top_10)]\ntop_df.head()\nplt.figure(figsize=(10,10))\nplt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])\nplt.xticks(rotation=30)\nplt.show()\nsummer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']\nsummer_max_ratio=max(summer_df['Golden_Ratio'])\nsummer_max_ratio\nsummer_country_gold=summer_df.loc[summer_df['Gold_Summer'].idxmax(),'Country_Name']\nsummer_country_gold\nwinter_df['Golden_Ratio']=summer_df['Gold_Winter']/summer_df['Total_Winter']\nwinter_max_ratio=max(winter_df['Golden_Ratio'])\nwinter_country_gold=winter_df.loc[winter_df['Gold_Winter'].idxmax(),'Country_Name']\nwinter_country_gold\ntop_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']\ntop_max_ratio=max(top_df['Golden_Ratio'])\ntop_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']\ntop_country_gold\ndata_1=data[:-1]\ndata_1['Total_Points']=data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total']*1\nmost_points=max(data_1['Total_Points'])\nmost_points\nbest_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']\nbest_country\n# Plotting the best\nbest=data[data['Country_Name']==best_country]\nbest\nbest=best[['Gold_Total','Silver_Total','Bronze_Total']]\nbest\nbest.plot.bar()\nplt.xlabel(\"United States\")\nplt.ylabel(\"Medals\")\nplt.xticks(rotation=45)\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure", "numpy.where", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.bar", "matplotlib.pyplot.xticks" ] ]
iBobbyTS/Colab-DAIN
[ "29469e64db5d45e824def45769a2d7c7b72c431c" ]
[ "my_package/Interpolation/setup.py" ]
[ "#!/content/Python/bin/python3.6\nimport os\nimport torch\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\nfrom compiler_args import nvcc_args, cxx_args\n\nsetup(\n name='interpolation_cuda',\n ext_modules=[\n CUDAExtension('interpolation_cuda', [\n 'interpolation_cuda.cc',\n 'interpolation_cuda_kernel.cu'\n ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})\n ],\n cmdclass={\n 'build_ext': BuildExtension\n })\n" ]
[ [ "torch.utils.cpp_extension.CUDAExtension" ] ]
bartdavids/machine-learning-and-simulation
[ "4a4ca74e2252fa8311112e38b46ed46da3c105e2" ]
[ "english/simulation_scripts/D3Q19_lattice_boltzmann_method_python_jax.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nAdjustment from the 2D version from Machine Learning & Simulation code and video:\n https://www.youtube.com/watch?v=ZUXmO4hu-20&list=LL&index=1&ab_channel=MachineLearning%26Simulation\n https://github.com/Ceyron/machine-learning-and-simulation/blob/main/english/simulation_scripts/lattice_boltzmann_method_python_jax.py\n\nby Bart Davids. Originally made in Google Colab:\nhttps://colab.research.google.com/drive/1F3EH9_2N3lkEpgQXOScR3lcQ6oqCARPk?usp=sharing\n\nAdditional notes and figures for clarification can be found there.\n\n\"\"\"\n\n# Import dependancies\nimport jax\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport cmasher as cmr\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n \n # Enable 64bit JAX\n jax.config.update(\"jax_enable_x64\", True)\n \n # Radius of the cylinder\n radius = 5.5\n \n # Dimensions of domain\n ny = 50\n nz = 60\n nx = 300\n \n KINEMATIC_VISCOSITY = 0.0025 \n HORIZONTAL_INFLOW_VELOCITY = 0.04 \n \n reynolds_number = (HORIZONTAL_INFLOW_VELOCITY * radius) / KINEMATIC_VISCOSITY \n \n RELAXATION_OMEGA = (1.0 / (3.0 * KINEMATIC_VISCOSITY + 0.5))\n \n PLOT_EVERY_N_STEPS = 100\n SKIP_FIRS_N_ITERATIONS = 5000 \n N_ITERATIONS = 20000\n print('Reynolds number:', reynolds_number)\n \n # Define a mesh for the obstacle mask\n x = jnp.arange(nx)\n y = jnp.arange(ny)\n z = jnp.arange(nz)\n X, Y, Z = jnp.meshgrid(x, y, z, indexing=\"ij\")\n \n cylinder = jnp.sqrt((X - nx//5)**2 + (Y - ny//2)**2)\n obstacle_mask = cylinder < radius\n \n # Show topview of the cylinder:\n plt.imshow(obstacle_mask[:, :, nz//2].T)\n plt.show()\n \n # Front view:\n plt.imshow(obstacle_mask[nx//5, :, :].T)\n plt.show()\n \n # Side View:\n plt.imshow(obstacle_mask[:, ny//2, :].T)\n plt.show()\n \n def get_density(discrete_velocities):\n density = jnp.sum(discrete_velocities, axis=-1)\n return density\n \n def get_macroscopic_velocities(discrete_velocities, density):\n return jnp.einsum(\"NMLQ,dQ->NMLd\", discrete_velocities, LATTICE_VELOCITIES) / density[..., jnp.newaxis]\n \n def get_equilibrium_discrete_velocities(macroscopic_velocities, density):\n projected_discrete_velocities = jnp.einsum(\"dQ,NMLd->NMLQ\", LATTICE_VELOCITIES, macroscopic_velocities)\n macroscopic_velocity_magnitude = jnp.linalg.norm(macroscopic_velocities, axis=-1, ord=2)\n equilibrium_discrete_velocities = (density[..., jnp.newaxis] * LATTICE_WEIGHTS[jnp.newaxis, jnp.newaxis, jnp.newaxis, :] *\n (1 + 3 * projected_discrete_velocities + 9/2 * projected_discrete_velocities**2 -\n 3/2 * macroscopic_velocity_magnitude[..., jnp.newaxis]**2)) \n return equilibrium_discrete_velocities\n \n N_DISCRETE_VELOCITIES = 19\n \n # 3D lattice velocities and numbering used as in: \n # https://www.researchgate.net/publication/290158292_An_introduction_to_Lattice-Boltzmann_methods\n LATTICE_INDICES = jnp.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18])\n LATICE_VELOCITIES_X = jnp.array([ 0, 1, 0,-1, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1, 0, 0, 0, 0])\n LATICE_VELOCITIES_Y = jnp.array([ 0, 0, 1, 0,-1, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0, 1,-1,-1, 1])\n LATICE_VELOCITIES_Z = jnp.array([ 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 0, 1, 1,-1,-1, 1, 1,-1,-1])\n \n OPPOSITE_LATTICE_INDICES = jnp.array([ 0, 3, 4, 1, 2, 6, 5, 9,10, 7, 8,13,14,11,12,17,18,15,16])\n \n LATTICE_VELOCITIES = jnp.array([LATICE_VELOCITIES_X,\n LATICE_VELOCITIES_Y,\n LATICE_VELOCITIES_Z])\n \n LATTICE_WEIGHTS = jnp.array([# rest particle\n 1/3, \n \n # face-connected neighbors\n 1/18, 1/18, 1/18, 1/18, 1/18, 1/18,\n \n # edge-connected neighbors\n 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36, 1/36])\n \n # Velocity directions/planes\n RIGHT_VELOCITIES = jnp.array([1, 7, 10, 11, 14]) # LATICE_VELOCITIES_X = 1\n LEFT_VELOCITIES = jnp.array([3, 8, 9, 12, 13]) # LATICE_VELOCITIES_X =-1\n YZ_VELOCITIES = jnp.array([0, 2, 4, 5, 6, 15, 16, 17, 18]) # LATICE_VELOCITIES_X = 0\n \n \n VELOCITY_PROFILE = jnp.zeros((nx, ny, nz, 3))\n VELOCITY_PROFILE = VELOCITY_PROFILE.at[:, :, :, 0].set(HORIZONTAL_INFLOW_VELOCITY)\n discrete_velocities_prev = get_equilibrium_discrete_velocities(VELOCITY_PROFILE, \n jnp.ones((nx, ny, nz)))\n \n @jax.jit\n def update(discrete_velocities_prev):\n # (1) Prescribe the outflow BC on the right boundary. Flow can go out, but not back in.\n discrete_velocities_prev = discrete_velocities_prev.at[-1, :, :, LEFT_VELOCITIES].set(discrete_velocities_prev[-2, :, :, LEFT_VELOCITIES])\n \n # (2) Determine macroscopic velocities\n density_prev = get_density(discrete_velocities_prev)\n macroscopic_velocities_prev = get_macroscopic_velocities(\n discrete_velocities_prev,\n density_prev)\n \n # (3) Prescribe Inflow Dirichlet BC using Zou/He scheme in 3D: \n # https://arxiv.org/pdf/0811.4593.pdf\n # https://terpconnect.umd.edu/~aydilek/papers/LB.pdf\n macroscopic_velocities_prev = macroscopic_velocities_prev.at[0, 1:-1, 1:-1, :].set(VELOCITY_PROFILE[0, 1:-1, 1:-1, :])\n lateral_densities = get_density(jnp.transpose(discrete_velocities_prev[0, :, :, YZ_VELOCITIES], axes = (1, 2, 0)))\n left_densities = get_density(jnp.transpose(discrete_velocities_prev[0, :, :, LEFT_VELOCITIES], axes = (1, 2, 0)))\n density_prev = density_prev.at[0, :, :].set((lateral_densities + 2 * left_densities) / \n (1 - macroscopic_velocities_prev[0, :, :, 0]))\n \n # (4) Compute discrete Equilibria velocities\n equilibrium_discrete_velocities = get_equilibrium_discrete_velocities(\n macroscopic_velocities_prev,\n density_prev)\n \n # (3) Belongs to the Zou/He scheme\n discrete_velocities_prev =\\\n discrete_velocities_prev.at[0, :, :, RIGHT_VELOCITIES].set(\n equilibrium_discrete_velocities[0, :, :, RIGHT_VELOCITIES])\n \n # (5) Collide according to BGK\n discrete_velocities_post_collision = (discrete_velocities_prev - RELAXATION_OMEGA *\n (discrete_velocities_prev - equilibrium_discrete_velocities))\n \n # (6) Bounce-Back Boundary Conditions to enfore the no-slip \n for i in range(N_DISCRETE_VELOCITIES):\n discrete_velocities_post_collision = discrete_velocities_post_collision.at[obstacle_mask, LATTICE_INDICES[i]].set(\n discrete_velocities_prev[obstacle_mask, OPPOSITE_LATTICE_INDICES[i]])\n \n \n # (7) Stream alongside lattice velocities\n discrete_velocities_streamed = discrete_velocities_post_collision\n for i in range(N_DISCRETE_VELOCITIES):\n discrete_velocities_streamed = discrete_velocities_streamed.at[:, :, :, i].set(\n jnp.roll(\n jnp.roll(\n jnp.roll(\n discrete_velocities_post_collision[:, :, :, i], LATTICE_VELOCITIES[0, i], axis = 0),\n \t LATTICE_VELOCITIES[1, i], axis = 1),\n LATTICE_VELOCITIES[2, i], axis = 2))\n \n return discrete_velocities_streamed\n \n def run(discrete_velocities_prev): \n for i in tqdm(range(N_ITERATIONS)):\n discrete_velocities_next = update(discrete_velocities_prev)\n discrete_velocities_prev = discrete_velocities_next\n \n if i % PLOT_EVERY_N_STEPS == 0 and i > SKIP_FIRS_N_ITERATIONS - PLOT_EVERY_N_STEPS:\n density = get_density(discrete_velocities_next)\n macroscopic_velocities = get_macroscopic_velocities(\n discrete_velocities_next,\n density)\n print('\\n', jnp.max(macroscopic_velocities))\n velocity_magnitude = jnp.linalg.norm(\n macroscopic_velocities,\n axis=-1,\n ord=2)\n fig = plt.figure(figsize = (15, 3))\n cont = plt.contourf(X[:, :, nz//2], Y[:, :, nz//2], jnp.flip(velocity_magnitude[:, :, nz//2], axis = 1), alpha=0.8, cmap=cmr.iceburn) \n plt.axis('scaled')\n plt.axis('off')\n plt.show()\n \n return \n \n run(discrete_velocities_prev)\n\n" ]
[ [ "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.figure", "matplotlib.pyplot.imshow" ] ]
cmoestl/heliocats
[ "5e2b054990319e14859669561a361cc0c7ca4295" ]
[ "sircat.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# ## sircat\n# \n# Makes a catalog of solar wind stream interaction regions (SIRs) and high speed solar wind streams (HSS) for the Wind, STEREO and MAVEN spacecraft since 2007.\n# \n# Authors: [C. Möstl](https://www.iwf.oeaw.ac.at/en/user-site/christian-moestl/) (twitter @chrisoutofspace), A. J. Weiss, R. L. Bailey, IWF Graz, Austria; Lan Jian, NASA, USA, Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China.\n# \n# \n# **current status: work in progress** \n# \n# If you want to use parts of this code for generating results for peer-reviewed scientific publications, please contact us per email (christian.moestl@oeaw.ac.at, lan.jian@nasa.gov, maxime.grandin@helsinki.fi) for co-authorships.\n# \n# \n# part of https://github.com/cmoestl/heliocats, last update June 2020\n# \n# ---\n# \n# ### Installation \n# In a command line, do: \"git clone https://github.com/cmoestl/heliocats\".\n# \n# Install a specific conda environment to run this code, see README at https://github.com/cmoestl/heliocats\n# \n# Download the files from https://doi.org/10.6084/m9.figshare.11973693 and place them in the /data folder.\n# \n# \n# \n# ### Updates\n# \n# Adding a new SIR event: change the source files, or add the sir and hss times in section 2 before the master file sircat/HELIO4CAST_SIRCAT_v10_master.xlsx is produced. Then delete the file for the respective spacecraft under sircat/indices_sircat, and run this notebook or script.\n# \n# Convert this notebook to a script with \"jupyter nbconvert --to script sircat.ipynb\" in a command line\n# \n# ---\n# \n# \n# ### Data sources\n# \n# \n# **PSP SIR list**: Allen et al. 2021: https://www.aanda.org/articles/aa/full_html/2021/06/aa39833-20/aa39833-20.html, list at https://sppgway.jhuapl.edu/event_list\n# \n# \n# **STEREO SIR list**: Lan Jian, https://stereo-ssc.nascom.nasa.gov/data/ins_data/impact/level3/\n# published in: L. K. Jian et al. https://doi.org/10.1007/s11207-019-1416-8, 2019.\n# \n# This catalog contains the SIR start and end times, as well as the Pt max time for the stream interface. We use their SIR start and ends time as our *sir_start_time* and *sir_end_time*, and set the *hss_start_time* with the Pt max time. For 4 Pt max times that were nan in the Jian et al. list, the *hss_start_time* has been set similar to the *sir_end_time*.\n# \n# **To do**: create our own *hss_end_time* by setting it as the first time when the total bulk speed drops below 450 km/s after *sir_end_time*. Lan: For the STEREO HSS catalog, you can opt to list only the events with the fastest speed reaching at least 500 km/s, to be consistent with Grandin et al. (2019).\"\n# \n# \n# **Earth SIR/HSS list**: Maxim Grandin et al., 2018, https://doi.org/10.1029/2018JA026396\n# \n# This catalog directly gives the *hss_start_time* and the *hss_end_time*. This list was determined by an algorithm and there are no specifics about the the SIR times, instead the start time is determined as the start of the increasing speed and is thus is likely closer to an SIR start time than to a stream interface time, which we use as a *hss_start_time*. For simplicity, we have nevertheless taken the given start time as the hss_start_time. \n# The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were originally given for the magnetopause, but the Wind spacecraft is located at the L1 point. One hour is practically equivalent to the propagation time of a 400 km/s slow solar wind from the L1 point to the magnetopause.\n# \n# **To do**: In future updates, we may change hss_start_time to the sir_start_time and add a proper hss_start_time by searching for ptmax after a new sir_start_time. The Grandin et al. (2019) catalogue only contains events for which the solar wind speed reached at least 500 km/s. Lan: \"For Grandin et al. (2019), you can use the peak of total pressure to approximate the stream interface time.\"\n# \n# \n# **MARS SIR/HSS list**: Hui Huang et al., 2019, https://doi.org/10.3847/1538-4357/ab25e9 (open access not available).\n# This catalog gives the sir_start_time, hss_start_time (=stream interface time) and the sir_end_time. \n# \n# **To do**: Similar to the STEREO-list, with have added the hss_end_time.\n# \n# \n# All other parameters are calculated from scratch from the spacecraft data via this notebook or script.\n# \n# ---\n# \n# ### Other resourcess\n# \n# \n# **Great review on SIRs** by Ian G. Richardson: https://link.springer.com/article/10.1007/s41116-017-0011-z\n# \n# \n# ---\n# \n# \n# \n# \n# \n# \n\n# start with importing packages, get paths from config.py file and make directories \n\n# In[405]:\n\n\nlast_update='2021-July-13'\n\n\n# In[11]:\n\n\nimport numpy as np\nimport scipy.io\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import DateFormatter\nfrom datetime import timedelta\nimport seaborn as sns\nimport datetime\nimport astropy\nimport astropy.constants as const\nfrom sunpy.time import parse_time\nimport time\nimport pickle\nimport sys\nimport os\nimport urllib\nimport json\nimport importlib\nimport pandas as pd\nimport copy\nimport openpyxl\nimport h5py\n\nfrom heliocats import plot as hp\nimportlib.reload(hp) #reload again while debugging\n\nfrom heliocats import data as hd\nimportlib.reload(hd) #reload again while debugging\n\nfrom heliocats import cats as hc\nimportlib.reload(hc) #reload again while debugging\n\nfrom heliocats import stats as hs\nimportlib.reload(hs) #reload again while debugging\n\n#where the in situ data files are located is read \n#from config.py \nimport config\nimportlib.reload(config)\nfrom config import data_path\nfrom config import data_path_ML\n\n\n########### make directories first time if not there\n\nresdir='results'\nif os.path.isdir(resdir) == False: os.mkdir(resdir)\n\ndatadir='data'\nif os.path.isdir(datadir) == False: os.mkdir(datadir)\n\nindexdir='sircat/indices_sircat' \nif os.path.isdir(indexdir) == False: os.mkdir(indexdir) \n\ncatdir='sircat'\nif os.path.isdir(catdir) == False: os.mkdir(catdir)\n\nsirplotsdir='sircat/plots_sircat/' \nif os.path.isdir(sirplotsdir) == False: os.mkdir(sirplotsdir) \n\n#Convert this notebook to a script with jupyter nbconvert --to script icmecat.ipynb\nos.system('jupyter nbconvert --to script sircat.ipynb') \n\n#in situ data files are updated via the icmecat.ipynb notebook \n \n\n## (1) load data \n\n\n# ## (1) load data from STEREO-B, STEREO-A, Wind, PSP, and MAVEN\n# \n\n# In[2]:\n\n\nload_data=1\n\nif load_data > 0: \n \n #print('load Ulysses RTN') #made with heliocats.data.save_ulysses_data\n #fileuly='ulysses_1990_2009_rtn.p'\n #[uly,huly]=pickle.load(open(data_path+fileuly, \"rb\" ) ) \n \n print('load STEREO-B data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ \n filestb='stereob_2007_2014_sceq.p'\n [stb,hstb]=pickle.load(open(data_path+filestb, \"rb\" ) ) \n \n\n ########### CURRENT ACTIVE SPACECRAFT \n\n \n # ADD BepiColombo \n \n \n # ADD Solar Orbiter\n \n \n print('load MAVEN data MSO') #removed magnetosphere by C. Simon Wedlund, 1 data point per orbit, MSO \n #filemav='maven_2014_2018.p'\n #[mav,hmav]=pickle.load(open(filemav, 'rb' ) )\n #filemav='maven_2014_2018_removed.p'\n #[mav,hmav]=pickle.load(open(filemav, 'rb' ) ) \n filemav='maven_2014_2018_removed_smoothed.p'\n [mav,hmav]=pickle.load(open(data_path+filemav, 'rb' ) )\n \n #print('load MSL RAD')\n #MSL RAD\n #rad=hd.load_msl_rad()#, rad.time,rad.dose_sol\n\n \n ##############################################\n print('load PSP data SCEQ') #from heliosat, converted to SCEQ similar to STEREO-A/B\n filepsp='psp_2018_2021_sceq.p'\n [psp,hpsp]=pickle.load(open(data_path+filepsp, \"rb\" ) ) \n \n \n \n ########### STA\n \n print('load and merge STEREO-A data SCEQ') #yearly magplasma files from stereo science center, conversion to SCEQ \n filesta1='stereoa_2007_2020_sceq.p'\n sta1=pickle.load(open(data_path+filesta1, \"rb\" ) ) \n \n #beacon data\n #filesta2=\"stereoa_2019_2020_sceq_beacon.p\"\n #filesta2='stereoa_2019_2020_sept_sceq_beacon.p'\n #filesta2='stereoa_2019_now_sceq_beacon.p'\n #filesta2=\"stereoa_2020_august_november_sceq_beacon.p\" \n filesta2='stereoa_2020_now_sceq_beacon.p'\n \n [sta2,hsta2]=pickle.load(open(data_path+filesta2, \"rb\" ) ) \n #cutoff with end of science data\n sta2=sta2[np.where(sta2.time >= parse_time('2020-Aug-01 00:00').datetime)[0]]\n\n #make array\n sta=np.zeros(np.size(sta1.time)+np.size(sta2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)]) \n\n #convert to recarray\n sta = sta.view(np.recarray) \n sta.time=np.hstack((sta1.time,sta2.time))\n sta.bx=np.hstack((sta1.bx,sta2.bx))\n sta.by=np.hstack((sta1.by,sta2.by))\n sta.bz=np.hstack((sta1.bz,sta2.bz))\n sta.bt=np.hstack((sta1.bt,sta2.bt))\n sta.vt=np.hstack((sta1.vt,sta2.vt))\n sta.np=np.hstack((sta1.np,sta2.np))\n sta.tp=np.hstack((sta1.tp,sta2.tp))\n sta.x=np.hstack((sta1.x,sta2.x))\n sta.y=np.hstack((sta1.y,sta2.y))\n sta.z=np.hstack((sta1.z,sta2.z))\n sta.r=np.hstack((sta1.r,sta2.r))\n sta.lon=np.hstack((sta1.lon,sta2.lon))\n sta.lat=np.hstack((sta1.lat,sta2.lat))\n print('STA Merging done')\n\n\n ########### Wind\n print('load and merge Wind data HEEQ') \n #from HELCATS HEEQ until 2018 1 1 + new self-processed data with heliosat and hd.save_wind_data\n filewin=\"wind_2007_2018_heeq_helcats.p\" \n [win1,hwin1]=pickle.load(open(data_path+filewin, \"rb\" ) ) \n \n filewin2=\"wind_2018_now_heeq.p\" \n [win2,hwin2]=pickle.load(open(data_path+filewin2, \"rb\" ) ) \n \n #function for spike removal, see list with times in that function\n win2=hd.remove_wind_spikes_gaps(win2)\n\n #merge Wind old and new data \n #cut off HELCATS data at end of 2017, win2 begins exactly after this\n win1=win1[np.where(win1.time < parse_time('2018-Jan-01 00:00').datetime)[0]]\n #make array\n win=np.zeros(np.size(win1.time)+np.size(win2.time),dtype=[('time',object),('bx', float),('by', float), ('bz', float),('bt', float),('vt', float),('np', float),('tp', float), ('x', float),('y', float),('z', float), ('r', float),('lat', float),('lon', float)]) \n\n #convert to recarray\n win = win.view(np.recarray) \n win.time=np.hstack((win1.time,win2.time))\n win.bx=np.hstack((win1.bx,win2.bx))\n win.by=np.hstack((win1.by,win2.by))\n win.bz=np.hstack((win1.bz,win2.bz))\n win.bt=np.hstack((win1.bt,win2.bt))\n win.vt=np.hstack((win1.vt,win2.vt))\n win.np=np.hstack((win1.np,win2.np))\n win.tp=np.hstack((win1.tp,win2.tp))\n win.x=np.hstack((win1.x,win2.x))\n win.y=np.hstack((win1.y,win2.y))\n win.z=np.hstack((win1.z,win2.z))\n win.r=np.hstack((win1.r,win2.r))\n win.lon=np.hstack((win1.lon,win2.lon))\n win.lat=np.hstack((win1.lat,win2.lat))\n\n print('Wind merging done')\n \n \n \nprint()\n \nprint() \nprint('time ranges of the in situ data: ') \nprint()\nprint('active spacecraft:')\nprint('Wind ',str(win.time[0])[0:10],str(win.time[-1])[0:10])\nprint('STEREO-A ',str(sta.time[0])[0:10],str(sta.time[-1])[0:10])\nprint('Parker Solar Probe ',str(psp.time[0])[0:10],str(psp.time[-1])[0:10])\nprint('MAVEN ',str(mav.time[0])[0:10],str(mav.time[-1])[0:10])\n#print('MSL/RAD ',str(rad.time[0])[0:10],str(rad.time[-1])[0:10])\nprint()\nprint('missions finished:')\n#print('VEX ',str(vex.time[0])[0:10],str(vex.time[-1])[0:10])\n#print('MESSENGER ',str(mes.time[0])[0:10],str(mes.time[-1])[0:10])\nprint('STEREO-B ',str(stb.time[0])[0:10],str(stb.time[-1])[0:10])\n#print('Ulysses ',str(uly.time[0])[0:10],str(uly.time[-1])[0:10])\nprint()\n# print('catalogs:')\n# print()\n# print('HELCATS HIGeoCAT ',str(higeocat_time[0])[0:10],str(higeocat_time[-1])[0:10])\n\n\n\nprint('done')\n\n\n# ## (2) make SIRCAT masterfile from STEREO and Wind catalogs\n\n# Here we read raw STEREO SIR and Earth SIR catalogs from Robert Allen, Lan Jian, Maxim Grandin, and Hui Huang et al. and convert to master catalog xlsx file that contains all times in a consistent way.\n\n# In[302]:\n\n\n#make list for all basic times, ids etc. for master file\nrows_list = []\n\ndef convert_time(p_time):\n \n #from Allen catalog format to datetime object\n \n p_time_obj=[]\n for i in np.arange(0,len(p_time)):\n p_str=p_time[i][0:10]+'T'+p_time[i][11:16]+'Z'\n p_time_obj.append(parse_time(p_str).datetime) \n #print(p_time_obj[i])\n \n #dates with year 1 set to nan:\n if mdates.date2num(p_time_obj[i])< 10: p_time_obj[i]=np.nan\n \n return p_time_obj\n\n#read all Allen catalogs\npsp_sir_file='sircat/sources/SIR_CIR_List_PSP.csv'\npsp_l1_sir_file='sircat/sources/SIR_CIR_List_L1_corr_PSP.csv'\npsp_sta_sir_file='sircat/sources/SIR_CIR_List_STA_corr_PSP.csv'\n\n#psp\np_raw=pd.read_csv(psp_sir_file, header=49)\n#wind\npw_raw=pd.read_csv(psp_l1_sir_file, header=51)\n#sta\npa_raw=pd.read_csv(psp_sta_sir_file, header=51)\n\nprint(p_raw.keys())\nprint()\n\n#################################\n\n############ PSP\nprint()\np_raw['Start time']=convert_time(p_raw['Start time'])\np_raw['End time']=convert_time(p_raw['End time'])\np_raw['Time of max P']=convert_time(p_raw['Time of max P'])\n#print(p_raw['Start time'])\n#print(p_raw['End time'])\n#print(p_raw['Time of max P'])\n\n\n\nfor i in np.arange(0,len(p_raw)):\n \n #make id for event \n id_time=parse_time(p_raw['Start time'][i]).isot\n sc_idstring='SIR_PSP_ALLEN_'\n sc_string='PSP' \n sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'\n \n #put all data for this event in a list\n list1 = [sircat_id,sc_string,np.nan,parse_time(p_raw['Start time'][i]).isot, np.nan, parse_time(p_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]\n \n \n #print(list1) \n #append to full list\n rows_list.append(list1)\n\n\n\nprint(rows_list[1])\n\n\n############ Wind\nprint()\npw_raw['Start time']=convert_time(pw_raw['Start time'])\npw_raw['End time']=convert_time(pw_raw['End time'])\npw_raw['Time of max P']=convert_time(pw_raw['Time of max P'])\n#print(pw_raw['Start time'])\n#print(pw_raw['End time'])\n#print(pw_raw['Time of max P'])\n\n\n\n\nfor i in np.arange(0,len(pw_raw)):\n \n #make id for event \n id_time=parse_time(pw_raw['Start time'][i]).isot\n sc_idstring='SIR_WIND_ALLEN_'\n sc_string='Wind' \n sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'\n \n #put all data for this event in a list\n list2 = [sircat_id,sc_string,np.nan,parse_time(pw_raw['Start time'][i]).isot, np.nan, parse_time(pw_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]\n \n #print(list1) \n #append to full list\n rows_list.append(list2)\n\n\n\nprint(rows_list[-1])\n\n\n\n\n############STA\nprint()\npa_raw['Start time']=convert_time(pa_raw['Start time'])\npa_raw['End time']=convert_time(pa_raw['End time'])\npa_raw['Time of max P']=convert_time(pa_raw['Time of max P'])\n#print(pa_raw['Start time'])\n#print(pa_raw['End time'])\n#print(pa_raw['Time of max P'])\n\n\nfor i in np.arange(0,len(pa_raw)):\n \n #make id for event \n id_time=parse_time(pa_raw['Start time'][i]).isot\n sc_idstring='SIR_STEREO_A_ALLEN_'\n sc_string='STEREO-A' \n sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'\n \n #put all data for this event in a list\n list3 = [sircat_id,sc_string,np.nan,parse_time(pa_raw['Start time'][i]).isot, np.nan, parse_time(pa_raw['End time'][i]).isot, np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]\n \n \n #print(list1) \n #append to full list\n rows_list.append(list3)\n\n\n\nprint(rows_list[-1])\n\n \n#\n#pw_raw['Start time']\n#ptime=parse_time(p_raw['Start time']).datetime\n\n\n\n###################### read raw STEREO SIR catalog\n\nfile='sircat/sources/STEREO_Level3_SIR_data.xlsx'\nprint('load Jian STEREO catalog from excel file:', file)\nsraw=pd.read_excel(file)\n\n#get 2 times: HSS start (equivalent to SIR start as defined in the L. Jian catalog), HSS end (where speed again < 450km/s)\n\nprint('Events in STEREO SIR cat:', sraw.shape[0])\nprint()\n\n\nsc=sraw.loc[:,'spacecraft']\nyear_start=sraw.loc[:,'year_start']\nstime=sraw.loc[:,'start_time']\n\nyear_end=sraw.loc[:,'year_end']\netime=sraw.loc[:,'end_time']\n\nyear_pt=sraw.loc[:,'year_pt']\nptime=sraw.loc[:,'pt_time']\n\n\nfor i in np.arange(0,sraw.shape[0]):\n \n \n\n s=stime[i] \n y=year_start[i]\n doy=int(s[0:3])\n hour=int(s[-5:-3])\n minute=int(s[-2:])\n #print(y,doy,hour, min)\n sir_start_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute)\n\n e=etime[i] \n y=year_end[i]\n doy=int(e[0:3])\n hour=int(e[-5:-3])\n minute=int(e[-2:])\n #print(y,doy,hour, min)\n sir_end_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute)\n\n #print(i)\n p=ptime[i] \n #print(ptime[i])\n y=year_pt[i]\n doy=int(p[0:3])\n hour=int(p[-5:-3])\n minute=int(p[-2:])\n #print(y,doy,hour, min)\n hss_start_time=datetime.datetime(y,1,1)+timedelta(days=doy-1)+timedelta(hours=hour)+timedelta(minutes=minute)\n \n \n #make id for event \n id_time=parse_time(hss_start_time).isot\n if sc[i]=='A': sc_idstring='SIR_STEREO_A_JIAN_'\n if sc[i]=='B': sc_idstring='SIR_STEREO_B_JIAN_'\n\n if sc[i]=='A': sc_string='STEREO-A'\n if sc[i]=='B': sc_string='STEREO-B'\n \n sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'\n \n #put all data for this event in a list\n list4 = [sircat_id,sc_string,parse_time(sir_start_time).isot,parse_time(hss_start_time).isot, parse_time(sir_end_time).isot,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]\n \n #print(list1) \n #append to full list\n rows_list.append(list4)\n \n\n########################## read raw Wind catalog\n\n#Grandin et al. 2018 - OMNI\n#removed 2 SIRs due to data gap of Wind in oct 2014\nfilewin='sircat/sources/grandin_2018_list_modified.txt'\nwraw=np.loadtxt(filewin,skiprows=9)\nprint('load Grandin Earth HSS catalog from:', filewin)\nprint('Events in Wind SIR/HSS cat:', wraw.shape[0])\nprint()\n\n#2 times: SIR/HSS start, HSS end (where speed again < 450km/s)\n\n#begin with 2007\nbegin2007=np.where(wraw[:,1]>=2007)[0][0]\n\n\nfor i in np.arange(begin2007,len(wraw),1):\n\n \n #SIR HSS start time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopause\n wstart=datetime.datetime(wraw[i,1].astype(int),wraw[i,2].astype(int), wraw[i,3].astype(int),wraw[i,4].astype(int), 0)-datetime.timedelta(hours=1) \n #SIR HSS end time y,m,d,h,m - minus 1 hour for Wind at L1, not magnetopause\n wend=datetime.datetime(wraw[i,11].astype(int),wraw[i,12].astype(int), wraw[i,13].astype(int),wraw[i,14].astype(int), 0)-datetime.timedelta(hours=1)\n\n\n sc_idstring='SIR_WIND_GRANDIN_'\n id_time=parse_time(wstart).isot\n sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'\n sc_string='Wind'\n \n list5 = [sircat_id,sc_string,np.nan,parse_time(wstart).isot,np.nan,parse_time(wend).isot,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]\n \n #print(list2)\n\n rows_list.append(list5)\n\n\n \n \n########################## read MAVEN catalog \n\nfrom heliocats import data as hd\nimportlib.reload(hd) #reload again while debugging\n\n#this is a recarray \nmavsir_all=hd.load_maven_sir_huang()\n\n#check which events overlap with the available MAVEN data\nmavsir_ind=np.where(mavsir_all.start < mav.time[-1])[0]\nmavsir=mavsir_all[mavsir_ind]\n \nprint('Events in MAVEN SIR/HSS cat:', mavsir.shape[0])\nprint()\n\n\n#go through all events\nfor i in mavsir_ind:\n \n sc_idstring='SIR_MAVEN_HUANG_'\n id_time=parse_time(mavsir.start[i][0]).isot\n sircat_id=sc_idstring+id_time[0:4]+id_time[5:7]+id_time[8:10]+'_01'\n sc_string='MAVEN'\n \n list6 = [sircat_id,sc_string,parse_time(mavsir.start[i][0]).isot,parse_time(mavsir.si[i][0]).isot,parse_time(mavsir.end[i][0]).isot,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan, np.nan,np.nan,np.nan]\n \n #print(list3)\n\n rows_list.append(list6)\n\n\n \n \n################################### add new events **** to be done\n#for measuring new events use this function from heliocats.plot \n#plt.close('all')\n#works in jupyter notebooks\n\n#works in scripts\n#matplotlib.use('qt5agg') \n#plt.ion()\n\n#STEREO-A\n#hp.plot_insitu_measure(sta, '2018-Jan-01 12:00','2018-Feb-01 12:00', 'STEREO-A', 'results/')\n\n#Wind\n#hp.plot_insitu_measure(win, '2019-Jan-29','2019-Feb-28', 'Wind', 'results/')\n\n \n \n \n################ make pandas data frame for master file\n \nparameters =['sircat_id','sc_insitu','sir_start_time','hss_start_time','sir_end_time', 'hss_end_time','hss_vtmax_time','sc_heliodistance', 'sc_long_heeq', 'sc_lat_heeq', \n 'hss_vtmax','hss_vtmean','hss_vtstd','hss_btmax','hss_btmean',\\\n 'hss_btstd','hss_bzmin', 'hss_bzmean','hss_bzstd','hss_duration',\\\n 'sir_vtmax','sir_vtmean', 'sir_vtstd','sir_btmax','sir_btmean',\\\n 'sir_btstd','sir_bzmin', 'sir_bzmean','sir_bzstd','sir_duration']\n\n\nmaster=pd.DataFrame(rows_list,columns=parameters)\n\n#sort by spacecraft indicator and start time\nmaster=master.sort_values(['sc_insitu','hss_start_time'])\nmaster = master.reset_index(drop=True) #drop extra index value\n\nmaster\n\n\n#save master file as Excel\nfile='sircat/HELIO4CAST_SIRCAT_v10_master.xlsx'\nmaster.to_excel(file,sheet_name='SIRCATv1.0')\nprint()\nprint('SIRCAT master saved as '+file)\nprint('total events', master.shape[0])\nprint('done')\n\n\n# ## (3) make SIRCAT \n\n# In[418]:\n\n\nfrom heliocats import cats as hc\nimportlib.reload(hc) #reload again while debugging\n\nfrom heliocats import plot as hp\nimportlib.reload(hp) #reload again while debugging\n\n#load master file\nscat=hc.load_helio4cast_sircat_master_from_excel('sircat/HELIO4CAST_SIRCAT_v10_master.xlsx')\nscat\n\n\n####### 3a get indices for all spacecraft\nwini=np.where(scat.sc_insitu == 'Wind')[:][0] \npspi=np.where(scat.sc_insitu == 'PSP')[:][0] \nstai=np.where(scat.sc_insitu == 'STEREO-A')[:][0] \nstbi=np.where(scat.sc_insitu == 'STEREO-B')[:][0] \nmavi=np.where(scat.sc_insitu == 'MAVEN')[:][0] \n\nprint('done')\n\n####### 3b get parameters for all spacecraft one after another\n# remove indices if the events in the master file have changed\n#os.system('rm sircat/indices_sircat/SIRCAT_indices_Wind.p')\n#os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-A.p')\n#os.system('rm sircat/indices_sircat/SIRCAT_indices_STEREO-B.p')\n#os.system('rm sircat/indices_sircat/SIRCAT_indices_MAVEN.p')\n#os.system('rm sircat/indices_sircat/SIRCAT_indices_PSP.p')\n\n\n#hss times\nscat=hc.get_sircat_parameters(psp,pspi,scat,'PSP')\nscat=hc.get_sircat_parameters(win,wini,scat,'Wind')\n\n#sir times\nscat=hc.get_sircat_parameters(mav,mavi,scat,'MAVEN')\nscat=hc.get_sircat_parameters(stb,stbi,scat,'STEREO-B')\n\n#both allen and jian cats\nscat=hc.get_sircat_parameters(sta,stai,scat,'STEREO-A')\n\n\n# ###### 3c make all plots if wanted\n#matplotlib.use('Agg')\n#hp.plot_sircat_events(sta,stai,scat,'STEREO-A',sirplotsdir)\n#hp.plot_sircat_events(stb,stbi,scat,'STEREO-B',sirplotsdir)\n#hp.plot_sircat_events(win,wini,scat,'Wind',sirplotsdir)\n#hp.plot_sircat_events(mav,mavi,scat,'MAVEN',sirplotsdir)\n\nprint('done')\n\n\n#kick out MAVEN events without data\n\n\n############### sort SIRCAt by date\nscat = scat.sort_values(by='hss_start_time',ascending=False)\nscat = ic.reset_index(drop=True)\n\n\n# ### (4) save SIRCAT \n\n# ### 4a save header\n\n# In[410]:\n\n\n#save header and parameters as text file and prepare for html website\nheader='SIR CATALOGUE v1.0 \\n\\nThis is the HELIO4CAST stream interaction region (SIR) and high speed stream (HSS) catalog,\\nbased on in situ magnetic field and bulk plasma observations in the heliosphere. \\nIt is a merged catalog created from individual ones made by Robert Allen et al., Lan Jian et al., Maxim Grandin et al. and Hui Huang et al. (see references).\\n\\nThis is version 1.0, released 2020-06-10, updated '+last_update+' doi: 10.6084/m9.figshare.12416906 \\n\\nThe catalog is available as python pandas dataframe (pickle), json, csv, xlsx, txt, html at \\nhttps://helioforecast.space/sircat \\n\\nNumber of events in SIRCAT: '+str(len(scat))+' \\nICME observatories: Parker Solar Probe, Wind, STEREO-A, STEREO-B, MAVEN \\nTime ranges: Parker Solar Probe: Oct 2018 - May 2020, Wind: Jan 2007 - Sep 2019, STEREO-A/B: Jan 2007 - Sep 2019, MAVEN: Dec 2014 - Jan 2018. \\n\\nAuthors: Christian Moestl, Andreas J. Weiss, R. L. Bailey, Martin A. Reiss, Space Research Institute, Austrian Academy of Sciences, Graz, Austria. \\nRobert Allen, JHU/APL, USA; Lan Jian, NASA, USA; Maxim Grandin, University of Helsinki, Finland; Hui Huang, Beijing University, China. \\n\\nRules: If results are produced with this catalog for peer-reviewed scientific publications, \\nplease contact christian.moestl@oeaw.ac.at, robert.allen@jhuapl.edu, lan.jian@nasa.gov, maxime.grandin@helsinki.fi for possible co-authorships. \\n\\nThis catalog has been made by getting the start and end times of each high speed stream from the \\nindividual catalogs, and then calculating all parameters again consistently from the data by us. \\nThe in situ data that were used for the creating catalog, with a size of 8 GB in total, including extra data \\nfiles with magnetic field components in RTN coordinates and other spacecrat that are not used for producing this catalog, \\ncan be downloaded in python pickle format as recarrays from https://doi.org/10.6084/m9.figshare.11973693.v7 \\nThe python code for producing this catalog is available at https://github.com/cmoestl/heliocats sircat.ipynb \\n\\nEach sircat_id has a tag in it that indicates from which catalog the ICME times were taken: \\n\\nParker Solar Probe: Allen et al. 2021, tag: ALLEN, \\nWind: Grandin et al. (2019), tag: GRANDIN \\nSTEREO-A: Jian et al. (2019), tag: JIAN. \\nSTEREO-B: Jian et al. (2019), tag: JIAN. \\nMAVEN: Huang et al. (2019), tag: HUANG. \\n\\nReferences \\nAllen et al. (2021), https://doi.org/10.1051/0004-6361/202039833 \\nGrandin, M. et al. (2019), https://doi.org/10.1029/2018JA026396 \\nJian, L. et al. (2019), https://doi.org/10.1007/s11207-019-1416-8 \\nHuang, H. et al. (2019), https://doi.org/10.3847/1538-4357/ab25e9 \\n\\nComments: \\n- The STEREO catalog contains the SIR start, stream interface and SIR end times. We use their stream interface time as our hss_start_time. \\n- The MAVEN catalog has similar times as the STEREO catalog.\\n- Earth SIR/HSS list: This catalog directly gives the hss_start_time and the hss_end_time, but no SIR times. \\n- The times in the Earth SIR/HSS list have been modified to 1 hour earlier as these times were \\noriginally given for the magnetopause, but the Wind spacecraft is located at the L1 point. \\nOne hour is practically equivalent to the propagation time of a 400 km/s slow solar wind \\nfrom the L1 point to the magnetopause.\\n- Spacecraft positions are given in Heliocentric Earth Equatorial Coordinates (HEEQ) coordinates. \\n- The coordinate system for all magnetic field components is SCEQ, except for Wind (HEEQ, which is the equivalent for SCEQ for Earth). \\n Definition of SpaceCraft Equatorial Coordinates (SCEQ): \\n Z is the solar rotation axis. \\n Y is the cross product of Z and R, with R being the vector that points from the Sun to the spacecraft.\\n X completes the right handed triad (and points away from the Sun). \\nThis system is thus like HEEQ but centered on the respective in situ spacecraft, so the SCEQ X and Y \\nbase vectors are rotated by the HEEQ longitude of the in situ spacecraft from HEEQ X and Y.\\nThe Y vector is similar to the T vector in an RTN system for each spacecraft, but the X and Z vectors \\nare rotated around Y compared to an RTN system. The differences between RTN and SCEQ for spacecraft within \\na few degrees of the solar equatorial plane are very small (within a few 0.1 nT usually).\\nWe choose SCEQ because it has the advantage that a comparison between multipoint CME events \\nand for comparison to simulations there is always a similar reference plane (the solar equatorial plane). \\n\\n ' \n\n\nparameters_text='Parameters:\\n00: sircat_id: The unique identifier for the observed stream interaction region (SIR). unit: string. \\n01: sc insitu: The name of the in situ observing spacecraft. unit: string. \\n02: sir_start_time: Stream interaction region start time. unit: UTC. \\n03: hss_start_time: High speed stream start time, equal to the stream interface time (for STEREO, MAVEN catalogs). unit: UTC. \\n04: sir_end_time: End time of the stream interaction region. unit: UTC. \\n05: hss_end_time: High speed stream end time, criterion at Wind: speed < 450 km/s. unit: UTC. \\n06: hss_vtmax_time: High speed stream maxmimum speed time. unit: UTC. \\n07: sc_heliodistance: Heliocentric distance of the spacecraft at hss_start_time. unit: AU.\\n08: sc_long_heeq: Heliospheric longitude of the spacecraft at hss_start_time, range [-180,180]. unit: degree (HEEQ).\\n09: sc_lat_heeq: Heliospheric latitude of the spacecraft at hss_start_time, range [-90,90]. unit: degree (HEEQ).\\n10: hss_vt_max: Maximum proton speed from hss_start_time to hss_end_time. unit: km/s.\\n11: hss_vt_mean: Mean proton speed from hss_start_time to hss_end_time. unit: km/s.\\n12: hss_vt_std: Standard deviation of proton speed from hss_start_time to hss_end_time. unit: km/s.\\n13: hss_vt_mean: Mean proton speed from hss_start_time to hss_end_time. unit: km/s.\\n14: hss_bt_max: Maximum total magnetic field from hss_start_time to hss_end_time. unit: nT.\\n15: hss_bt_mean: Mean total magnetic field from hss_start_time to hss_end_time. unit: nT.\\n16: hss_bt_std: Standard deviation of total magnetic field from hss_start_time to hss_end_time. unit: nT.\\n17: hss_bz_min: Minimum Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\\n18: hss_bz_mean: Mean Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\\n19: hss_bz_std: Standard deviation of Bz component (SCEQ) from hss_start_time to hss_end_time. unit: nT.\\n20: hss_duration: Duration of high speed stream from hss_start_time to hss_end_time. unit: hours.\\n21: sir_vt_mean: Mean proton speed from hss_start_time to sir_end_time. unit: km/s.\\n22: sir_vt_std: Standard deviation of proton speed from sir_start_time to hss_end_time. unit: km/s.\\n23: sir_vt_mean: Mean proton speed from hss_start_time to sir_end_time. unit: km/s.\\n24: sir_bt_max: Maximum total magnetic field from sir_start_time to hss_end_time. unit: nT.\\n25: sir_bt_mean: Mean total magnetic field from sir_start_time to sir_end_time. unit: nT.\\n26: sir_bt_std: Standard deviation of total magnetic field from sir_start_time to sir_end_time. unit: nT.\\n27: sir_bz_min: Minimum Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\\n28: sir_bz_mean: Mean Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\\n29: sir_bz_std: Standard deviation of Bz component (SCEQ) from sir_start_time to sir_end_time. unit: nT.\\n30: sir_duration: Duration of stream interaction region from sir_start_time to sir_end_time. unit: hours.\\n\\n\\n'\n\nprint(header)\nprint(parameters_text)\n\n\n#make header file\nfile='sircat/HELIO4CAST_SIRCAT_v10_header.txt'\nwith open(file, \"w\") as text_file:\n text_file.write(header)\n text_file.write(parameters_text)\nprint() \nprint('header saved as '+file)\nprint() \n\n#Convert to html regarding line breaks, paragraph beginning and spaces\nheader_spaces=header.replace(\" \", \"&nbsp;\")\nheader_html= \"<p>\" +header_spaces.replace('\\n', '<br>')+ \"</p>\" \nparameters_spaces=parameters_text.replace(\" \", \"&nbsp;\")\nparameters_html= \"<p>\" +parameters_text.replace('\\n', '<br>')+ \"</p>\"\nprint('header converted to HTML')\nprint() \nprint() \n\n\n# ### 4b save into different formats\n\n# In[413]:\n\n\n########## python formats\n\n# save ICMECAT as pandas dataframe with times as datetime objects as pickle\nfile='sircat/HELIO4CAST_SIRCAT_v10_pandas.p'\npickle.dump([scat,header,parameters], open(file, 'wb'))\nprint('SIRCAT saved as '+file)\n\n\n#load sircat as pandas dataframe\nfile='sircat/HELIO4CAST_SIRCAT_v10_pandas.p'\n[scat_pandas,h,p]=pickle.load( open(file, 'rb')) \nscat.keys()\nscat\n\n\n# # save SIRCAT as numpy array with times as matplotlib datetime as pickle\n# scat_num=copy.deepcopy(scat) \n# scat_num.icme_start_time=parse_time(scat_num.icme_start_time).plot_date\n# scat_num.mo_start_time=parse_time(scat_num.mo_start_time).plot_date\n# scat_num.mo_end_time=parse_time(scat_num.mo_end_time).plot_date\n# #convert to recarray\n# scat_num_rec=scat_num.to_records()\n# #create structured array\n# dtype1=[('index','i8'),('icmecat_id', '<U30'),('sc_insitu', '<U20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]]\n# scat_num_struct=np.array(scat_num_rec,dtype=dtype1)\n\n\n\n# file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p'\n# pickle.dump([scat_num,scat_num_struct,header,parameters], open(file, 'wb'))\n# print('ICMECAT saved as '+file)\n\n\n\n\n################ save to different formats\n\n\n\n#get beginning of tags for STA to identify allen and jian events\ntag_list=[]\nfor i in np.arange(0,len(scat)):\n tag_list.append(scat.sircat_id[i][13]) #j\n\nstai_jian=np.where(np.logical_and(scat.sc_insitu == 'STEREO-A',np.array(tag_list)=='J'))[:][0] \nstai_allen=np.where(np.logical_and(scat.sc_insitu == 'STEREO-A',np.array(tag_list)=='A'))[:][0] \n\n#get indices of all SIR spacecraft in SIRCAT\nsir_sc=np.hstack([stai_jian,stbi,mavi])\n\n#get indices of all HSS spacecraft in SIRCAT\nhss_sc=np.hstack([pspi,wini,stai_allen])\n\n#copy pandas dataframe first to change time format consistent with HELIO4CAST\nscat_copy=copy.deepcopy(scat) \nscat_copy.at[sir_sc,'sir_start_time']=parse_time(scat.sir_start_time[sir_sc]).isot\nscat_copy.hss_start_time=parse_time(scat.hss_start_time).isot\nscat_copy.at[sir_sc,'sir_end_time']=parse_time(scat.sir_end_time[sir_sc]).isot\n\nscat_copy.at[hss_sc,'hss_end_time']=parse_time(scat.hss_end_time[hss_sc]).isot\n#scat_copy.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).isot\n\n#change time format for sir\nfor i in sir_sc:\n dum=scat_copy.sir_start_time[i] \n scat_copy.at[i,'sir_start_time']=dum[0:16]+'Z'\n\n dum=scat_copy.hss_start_time[i] \n scat_copy.at[i,'hss_start_time']=dum[0:16]+'Z'\n\n dum=scat_copy.sir_end_time[i] \n scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'\n\n\nfor i in hss_sc:\n dum=scat_copy.hss_start_time[i] \n scat_copy.at[i,'hss_start_time']=dum[0:16]+'Z'\n \n dum=scat_copy.hss_end_time[i] \n scat_copy.at[i,'hss_end_time']=dum[0:16]+'Z'\n\n #dum=scat_copy.hss_vtmax_time[i] \n #scat_copy.at[i,'hss_vtmax_time']=dum[0:16]+'Z'\n\n\n\n \n\n# for i in stbi:\n# dum=scat_copy.sir_end_time[i] \n# scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'\n\n# for i in stai:\n# dum=scat_copy.sir_end_time[i] \n# scat_copy.at[i,'sir_end_time']=dum[0:16]+'Z'\n \n\n\n\n\n#save as Excel\nfile='sircat/HELIO4CAST_SIRCAT_v10.xlsx'\nscat_copy.to_excel(file,sheet_name='SIRCATv1.0')\nprint('SIRCAT saved as '+file)\n\n#save as json\nfile='sircat/HELIO4CAST_SIRCAT_v10.json'\nscat_copy.to_json(file)\nprint('SIRCAT saved as '+file)\n\n#save as csv\nfile='sircat/HELIO4CAST_SIRCAT_v10.csv'\nscat_copy.to_csv(file)\nprint('SIRCAT saved as '+file)\n\n#save as txt\nfile='sircat/HELIO4CAST_SIRCAT_v10.txt'\nnp.savetxt(file, scat_copy.values.astype(str), fmt='%s' )\nprint('SIRCAT saved as '+file)\n\n\n# In[415]:\n\n\n#########################\n\n\n# #########save into hdf5 format , use S for strings http://docs.h5py.org/en/stable/strings.html#what-about-numpy-s-u-type\n# dtype2=[('index','i8'),('icmecat_id', 'S30'),('sc_insitu', 'S20')] +[(i, '<f8') for i in ic.keys()[2:len(ic.keys())]]\n# ich5=np.array(scat_num_rec,dtype=dtype2)\n# file='icmecat/HELIO4CAST_ICMECAT_v20.h5'\n# f=h5py.File(file,mode='w')\n# f[\"icmecat\"]= ich5\n# #add attributes\n# #************************\n# #***********************\n\n# print('ICMECAT saved as '+file)\n# f.close()\n\n# #reading h5py files http://docs.h5py.org/en/latest/quick.html\n# #fr = h5py.File('icmecat/HELIO4CAST_ICMECAT_v20.h5', 'r')\n# #list(fr.keys())\n# #ich5=fr['icmecat']\n# #ich5['mo_bstd']\n# #ich5.dtype\n# #fr.close()\n# ##################\n\n\n# #save as .npy without pickle\n# file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.npy'\n# np.save(file,ich5, allow_pickle=False)\n# print('ICMECAT saved as '+file)\n\n# #for loading do:\n# #icnpy=np.load(file)\n# #decode strings:\n# #icnpy['icmecat_id'][0].decode()\n\n#copy pandas dataframe first to change time format consistent with HELIO4CAST\nscat_copy2=copy.deepcopy(scat) \nscat_copy2.at[sir_sc,'sir_start_time']=parse_time(scat.sir_start_time[sir_sc]).iso\nscat_copy2.hss_start_time=parse_time(scat.hss_start_time).iso\nscat_copy2.at[sir_sc,'sir_end_time']=parse_time(scat.sir_end_time[sir_sc]).iso\nscat_copy2.at[hss_sc,'hss_end_time']=parse_time(scat.hss_end_time[hss_sc]).iso\n#scat_copy2.at[hss_sc,'hss_vtmax_time']=parse_time(scat.hss_vtmax_time[hss_sc]).iso\n\n#change time format for sir\nfor i in sir_sc:\n dum=scat_copy2.sir_start_time[i] \n scat_copy2.at[i,'sir_start_time']=dum[0:16]\n\n dum=scat_copy2.hss_start_time[i] \n scat_copy2.at[i,'hss_start_time']=dum[0:16]\n\n dum=scat_copy2.sir_end_time[i] \n scat_copy2.at[i,'sir_end_time']=dum[0:16]\n\n\nfor i in hss_sc:\n dum=scat_copy2.hss_start_time[i] \n scat_copy2.at[i,'hss_start_time']=dum[0:16]\n \n dum=scat_copy2.hss_end_time[i] \n scat_copy2.at[i,'hss_end_time']=dum[0:16]\n\n #dum=scat_copy2.hss_vtmax_time[i] \n #scat_copy2.at[i,'hss_vtmax_time']=dum[0:16]\n\n\n\n#save as json for webpage with different time format\nfile='sircat/HELIO4CAST_SIRCAT_v10_isot.json'\nscat_copy2.to_json(file)\nprint('SIRCAT saved as '+file)\n\n\n\n#save as html no header\nfile='sircat/HELIO4CAST_SIRCAT_v10_simple.html'\nscat_copy.to_html(file)\nprint('SIRCAT saved as '+file)\n\n\n############ save as html file with header\n#save as html\nfile='sircat/HELIO4CAST_SIRCAT_v10.html'\n#ic.to_html(file,justify='center')\n\n#ichtml='{% extends \"_base.html\" %} \\n \\n {% block content %} \\n \\n \\n '\nichtml = header_html\nichtml += parameters_html\nichtml += scat_copy.to_html()\n#ichtml +='\\n \\n {% endblock %}'\n\n\nwith open(file,'w') as f:\n f.write(ichtml)\n f.close()\n \nprint('SIRCAT saved as '+file) \n\n\n# ## 4c load ICMECAT pickle files\n\n# In[416]:\n\n\n\n#load sircat as pandas dataframe\nfile='sircat/HELIO4CAST_SIRCAT_v10_pandas.p'\n[scat_pandas,h,p]=pickle.load( open(file, 'rb')) \nscat.keys()\nscat\n\n#load icmecat as numpy array\n# file='icmecat/HELIO4CAST_ICMECAT_v20_numpy.p'\n# [ic_nprec,ic_np,h,p]=pickle.load( open(file, 'rb')) \n\n\n# In[417]:\n\n\nscat_pandas\nscat_pandas.keys()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.array", "pandas.DataFrame", "pandas.read_excel", "numpy.where", "numpy.loadtxt", "numpy.arange", "numpy.size", "numpy.hstack", "pandas.read_csv", "matplotlib.dates.date2num" ] ]
John-ye666/Python-for-Finance-Second-Edition
[ "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "dabef09bcdd7b0ec2934774741bd0a7e1950de73", "dabef09bcdd7b0ec2934774741bd0a7e1950de73" ]
[ "Chapter09/c9_01_optimize.py", "Chapter08/c8_01_first_one.py", "Chapter13/c13_99_03_geneerate5year.py", "Chapter08/c8_28_52_week_high.py", "Chapter01/c1_20_matrix_dot_product.py" ]
[ "\"\"\"\n Name : c9_01_optimize.py\n Book : Python for Finance (2nd ed.)\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 6/6/2017\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\n\nfrom scipy.optimize import minimize\ndef myFunction(x):\n return (3.2+5*x**2)\n\nx0=100\nres = minimize(myFunction,x0,method='nelder-mead',options={'xtol':1e-8,'disp': True})\n\n", "\"\"\"\n Name : c8_01_first_one.py\n Book : Python for Finance (2nd ed.)\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 6/6/2017\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\n\nimport pandas as pd \nimport scipy as sp\nmean=0.10\nstd=0.2\nddate = pd.date_range('1/1/2016', periods=252) \nn=len(ddate)\n\ndata = pd.Series(sp.random.normal(mean,std,n), index=ddate)\nprint(data.head())", "\"\"\"\n Name : c13_99_03_generate5year.py\n Book : Python for Finance\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 2/27/2017\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\n\nimport pandas as pd\nx=pd.read_csv(\"c:/temp/migration5year.csv\",index_col=0)\ny=x/100\nprint(y)\ny.to_pickle(\"c:/temp/migration5year.pkl\")\n", "\"\"\"\n Name : c8_28_52_week_high.py\n Book : Python for Finance (2nd ed.)\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 6/6/2017\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\n\nimport numpy as np\nfrom datetime import datetime \nfrom dateutil.relativedelta import relativedelta \nfrom matplotlib.finance import quotes_historical_yahoo_ochl as getData\n#\nticker='IBM' \nenddate=datetime(2016,12,31)\n#\nbegdate=enddate-relativedelta(years=1) \np =getData(ticker, begdate, enddate,asobject=True, adjusted=True) \nx=p[-1] \ny=np.array(p.tolist())[:,-1] \nhigh=max(y) \nlow=min(y) \nprint(\" Today, Price High Low, % from low \") \nprint(x[0], x[-1], high, low, round((x[-1]-low)/(high-low)*100,2))\n", "# -*- coding: utf-8 -*-\n\"\"\"\n Name : c1_20_matrix_dot_product.py\n Book : Python for Finance (2nd ed.)\n Publisher: Packt Publishing Ltd. \n Author : Yuxing Yan\n Date : 6/6/2017\n email : yany@canisius.edu\n paulyxy@hotmail.com\n\"\"\"\nimport numpy as np\na=np.array([[1,2,3],[4,5,6]],float) # 2 by 3\nb=np.array([[1,2],[3,3],[4,5]],float) # 3 by 2\nnp.dot(a,b) # 2 by 2\nprint(np.dot(a,b))" ]
[ [ "scipy.optimize.minimize" ], [ "pandas.date_range", "scipy.random.normal" ], [ "pandas.read_csv" ], [ "matplotlib.finance.quotes_historical_yahoo_ochl" ], [ "numpy.array", "numpy.dot" ] ]
thispl/tpl-hrpro
[ "3e0e7b16e4e2553490380a29c72bf59a061433a2" ]
[ "hrpro/hrpro/report/continuous_absent_report/continuous_absent_report.py" ]
[ "# Copyright (c) 2013, TeamPRO and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nfrom six.moves import range\nfrom six import string_types\nimport frappe\nimport json\nfrom frappe.utils import (getdate, cint, add_months, date_diff, add_days,\n\tnowdate, get_datetime_str, cstr, get_datetime, now_datetime, format_datetime)\nfrom datetime import datetime\nfrom calendar import monthrange\nfrom frappe import _, msgprint\nfrom frappe.utils import flt\nfrom frappe.utils import cstr, cint, getdate\n\n\n\ndef execute(filters=None):\n if not filters:\n filters = {}\n columns = get_columns()\n data = []\n row = []\n conditions, filters = get_conditions(filters)\n attendance = get_attendance(conditions,filters)\n for att in attendance:\n data.append(att)\n return columns, data\n\n\ndef get_columns():\n columns = [\n _(\"ID\") + \":Data:200\",\n _(\"Attendance Date\") + \":Data:200\",\n _(\"Employee\") + \":Data:120\",\n _(\"Employee Name\") + \":Data:120\",\n _(\"Department\") + \":Data:120\",\n _(\"Status\") + \":Data:120\",\n # _(\"Present Shift\") + \":Data:120\"\n ]\n return columns\n\n\ndef get_attendance(conditions,filters):\n attendance = frappe.db.sql(\"\"\"Select name,employee, employee_name, department,attendance_date, shift,status\n From `tabAttendance` Where status = \"Absent\" and docstatus = 1 and %s group by employee,attendance_date\"\"\"% conditions,filters, as_dict=1)\n employee = frappe.db.get_all(\"Employee\",{\"status\":\"Active\"},[\"name\"])\n row = []\n emp_count = 0\n import pandas as pd\n mydates = pd.date_range(filters.from_date, filters.to_date).tolist()\n absent_date = []\n for emp in employee:\n for date in mydates:\n for att in attendance:\n if emp.name == att.employee:\n if att.attendance_date == date.date():\n att_date = date.date()\n absent_date += [(date.date())]\n emp_count += 1\n if emp_count >= 3:\n for ab_date in absent_date:\n row += [(att.name,ab_date,att.employee,att.employee_name,att.department,att.status)]\n frappe.errprint(row)\n return row\n\ndef get_conditions(filters):\n conditions = \"\"\n if filters.get(\"from_date\"): conditions += \" attendance_date >= %(from_date)s\"\n if filters.get(\"to_date\"): conditions += \" and attendance_date <= %(to_date)s\"\n if filters.get(\"company\"): conditions += \" and company = %(company)s\"\n if filters.get(\"employee\"): conditions += \" and employee = %(employee)s\"\n if filters.get(\"department\"): conditions += \" and department = %(department)s\"\n\n return conditions, filters" ]
[ [ "pandas.date_range" ] ]
zeinabhakimi/code
[ "68a6e9195ee99a4ee3a2a1a3e47fa5f75c937791" ]
[ "data_helpers.py" ]
[ "import csv\nimport numpy as np\nimport re\nimport itertools\n\nfrom collections import Counter\nfrom collections import namedtuple\n\n\nDataPoint = namedtuple('DataPoint', ['PhraseId', 'SentenceId', 'Phrase', 'Sentiment'])\n\n\ndef load_datapoints(data_file):\n datapoints = []\n with open(data_file) as f:\n reader = csv.DictReader(f, delimiter='\\t')\n for row in reader:\n if 'Sentiment' not in row:\n row['Sentiment'] = None\n dp = DataPoint(**row)\n datapoints.append(dp)\n return datapoints\n\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\n\ndef extract_phrases_in_datapoints(datapoints):\n x_text = [dp.Phrase for dp in datapoints]\n return [clean_str(sent) for sent in x_text]\n\n\ndef extract_phraseids_in_datapoints(datapoints):\n return [dp.PhraseId for dp in datapoints]\n\n\ndef load_data_and_labels(data_file):\n \"\"\"\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n \"\"\"\n # Load data from files\n datapoints = load_datapoints(data_file)\n x_text = extract_phrases_in_datapoints(datapoints)\n\n y = [int(dp.Sentiment) for dp in datapoints]\n\n def one_hot(i):\n return [0] * i + [1] + [0] * (4-i)\n\n y_vector = []\n for sentiment in y:\n y_vector.append(one_hot(sentiment))\n\n return [x_text, np.array(y_vector)]\n\n\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int(len(data)/batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n" ]
[ [ "numpy.array", "numpy.arange" ] ]
virkt25/adversarial-robustness-toolbox
[ "3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd", "3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd", "3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd" ]
[ "tests/attacks/test_virtual_adversarial.py", "mlops/kubeflow/robustness_evaluation_fgsm_pytorch/src/robustness.py", "tests/wrappers/test_wrapper.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\nimport numpy as np\n\nfrom art.attacks import VirtualAdversarialMethod\nfrom art.classifiers import KerasClassifier\nfrom art.classifiers.classifier import ClassifierNeuralNetwork, ClassifierGradients\nfrom art.utils import get_labels_np_array\n\nfrom tests.utils import TestBase\nfrom tests.utils import get_image_classifier_tf, get_image_classifier_kr, get_image_classifier_pt\nfrom tests.utils import get_tabular_classifier_tf, get_tabular_classifier_kr, get_tabular_classifier_pt\nfrom tests.attacks.utils import backend_test_classifier_type_check_fail\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestVirtualAdversarial(TestBase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.n_train = 100\n cls.n_test = 10\n cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]\n cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]\n cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]\n cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]\n\n def test_keras_mnist(self):\n classifier = get_image_classifier_kr()\n\n scores = classifier._model.evaluate(self.x_train_mnist, self.y_train_mnist)\n logging.info(\"[Keras, MNIST] Accuracy on training set: %.2f%%\", (scores[1] * 100))\n scores = classifier._model.evaluate(self.x_test_mnist, self.y_test_mnist)\n logging.info(\"[Keras, MNIST] Accuracy on test set: %.2f%%\", (scores[1] * 100))\n\n self._test_backend_mnist(classifier, self.x_test_mnist, self.y_test_mnist)\n\n def test_tensorflow_mnist(self):\n classifier, sess = get_image_classifier_tf(from_logits=False)\n\n scores = get_labels_np_array(classifier.predict(self.x_train_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_train_mnist, axis=1)) / self.y_train_mnist.shape[0]\n logger.info(\"[TF, MNIST] Accuracy on training set: %.2f%%\", (acc * 100))\n\n scores = get_labels_np_array(classifier.predict(self.x_test_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.y_test_mnist.shape[0]\n logger.info(\"[TF, MNIST] Accuracy on test set: %.2f%%\", (acc * 100))\n\n self._test_backend_mnist(classifier, self.x_test_mnist, self.y_test_mnist)\n\n def test_pytorch_mnist(self):\n x_train_mnist = np.swapaxes(self.x_train_mnist, 1, 3).astype(np.float32)\n x_test_mnist = np.swapaxes(self.x_test_mnist, 1, 3).astype(np.float32)\n classifier = get_image_classifier_pt()\n\n scores = get_labels_np_array(classifier.predict(x_train_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_train_mnist, axis=1)) / self.y_train_mnist.shape[0]\n logger.info(\"[PyTorch, MNIST] Accuracy on training set: %.2f%%\", (acc * 100))\n\n scores = get_labels_np_array(classifier.predict(x_test_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.y_test_mnist.shape[0]\n logger.info(\"[PyTorch, MNIST] Accuracy on test set: %.2f%%\", (acc * 100))\n\n self._test_backend_mnist(classifier, x_test_mnist, self.y_test_mnist)\n\n def _test_backend_mnist(self, classifier, x_test, y_test):\n x_test_original = x_test.copy()\n\n df = VirtualAdversarialMethod(classifier, batch_size=100, max_iter=2)\n\n x_test_adv = df.generate(x_test)\n\n self.assertFalse((x_test == x_test_adv).all())\n\n y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n self.assertFalse((y_test == y_pred).all())\n\n acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info(\"Accuracy on adversarial examples: %.2f%%\", (acc * 100))\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)\n\n def test_classifier_type_check_fail(self):\n backend_test_classifier_type_check_fail(\n VirtualAdversarialMethod, [ClassifierNeuralNetwork, ClassifierGradients]\n )\n\n def test_keras_iris_clipped(self):\n classifier = get_tabular_classifier_kr()\n\n # Test untargeted attack\n attack = VirtualAdversarialMethod(classifier, eps=0.1)\n x_test_iris_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_iris_adv).all())\n self.assertTrue((x_test_iris_adv <= 1).all())\n self.assertTrue((x_test_iris_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Accuracy on Iris with VAT adversarial examples: %.2f%%\", (acc * 100))\n\n def test_keras_iris_unbounded(self):\n classifier = get_tabular_classifier_kr()\n\n # Recreate a classifier without clip values\n classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)\n attack = VirtualAdversarialMethod(classifier, eps=1)\n x_test_iris_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_iris_adv).all())\n self.assertTrue((x_test_iris_adv > 1).any())\n self.assertTrue((x_test_iris_adv < 0).any())\n\n preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Accuracy on Iris with VAT adversarial examples: %.2f%%\", (acc * 100))\n\n # def test_iris_tf(self):\n # classifier, _ = get_iris_classifier_tf()\n #\n # attack = VirtualAdversarialMethod(classifier, eps=.1)\n # x_test_adv = attack.generate(x_test)\n # #print(np.min(x_test_adv), np.max(x_test_adv), np.min(x_test), np.max(x_test))\n # self.assertFalse((x_test == x_test_adv).all())\n # self.assertTrue((x_test_adv <= 1).all())\n # self.assertTrue((x_test_adv >= 0).all())\n #\n # preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n # self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())\n # acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]\n # logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))\n\n # def test_iris_pt(self):\n # (_, _), (x_test, y_test) = self.iris\n # classifier = get_iris_classifier_pt()\n #\n # attack = VirtualAdversarialMethod(classifier, eps=.1)\n # x_test_adv = attack.generate(x_test.astype(np.float32))\n # #print(np.min(x_test_adv), np.max(x_test_adv), np.min(x_test), np.max(x_test))\n # self.assertFalse((x_test == x_test_adv).all())\n # self.assertTrue((x_test_adv <= 1).all())\n # self.assertTrue((x_test_adv >= 0).all())\n #\n # preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n # self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())\n # acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]\n # logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))\n\n def test_tensorflow_iris(self):\n classifier, _ = get_tabular_classifier_tf()\n\n attack = VirtualAdversarialMethod(classifier, eps=0.1)\n\n with self.assertRaises(TypeError) as context:\n x_test_iris_adv = attack.generate(self.x_test_iris)\n\n self.assertIn(\n \"This attack requires a classifier predicting probabilities in the range [0, 1] as output.\"\n \"Values smaller than 0.0 or larger than 1.0 have been detected.\",\n str(context.exception),\n )\n\n def test_pytorch_iris(self):\n classifier = get_tabular_classifier_pt()\n\n attack = VirtualAdversarialMethod(classifier, eps=0.1)\n\n with self.assertRaises(TypeError) as context:\n x_test_iris_adv = attack.generate(self.x_test_iris.astype(np.float32))\n\n self.assertIn(\n \"This attack requires a classifier predicting probabilities in the range [0, 1] as output.\"\n \"Values smaller than 0.0 or larger than 1.0 have been detected.\",\n str(context.exception),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# MIT License\n#\n# Copyright (C) IBM Corporation 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Robustness evaluation module.\"\"\"\n\nimport numpy as np\nfrom minio import Minio\n\nimport torch\nimport torch.utils.data\n\nfrom art.classifiers.pytorch import PyTorchClassifier\nfrom art.attacks.evasion.fast_gradient import FastGradientMethod\n\nimport zipfile\nimport importlib\nimport re\n\nfrom robustness_util import get_metrics\n\n\ndef robustness_evaluation(object_storage_url, object_storage_username, object_storage_password,\n data_bucket_name, result_bucket_name, model_id,\n feature_testset_path='processed_data/X_test.npy',\n label_testset_path='processed_data/y_test.npy',\n clip_values=(0, 1),\n nb_classes=2,\n input_shape=(1, 3, 64, 64),\n model_class_file='model.py',\n model_class_name='model',\n LossFn='',\n Optimizer='',\n epsilon=0.2):\n\n url = re.compile(r\"https?://\")\n cos = Minio(url.sub('', object_storage_url),\n access_key=object_storage_username,\n secret_key=object_storage_password,\n secure=False)\n\n dataset_filenamex = \"X_test.npy\"\n dataset_filenamey = \"y_test.npy\"\n weights_filename = \"model.pt\"\n model_files = model_id + '/_submitted_code/model.zip'\n\n cos.fget_object(data_bucket_name, feature_testset_path, dataset_filenamex)\n cos.fget_object(data_bucket_name, label_testset_path, dataset_filenamey)\n cos.fget_object(result_bucket_name, model_id + '/' + weights_filename, weights_filename)\n cos.fget_object(result_bucket_name, model_files, 'model.zip')\n\n # Load PyTorch model definition from the source code.\n zip_ref = zipfile.ZipFile('model.zip', 'r')\n zip_ref.extractall('model_files')\n zip_ref.close()\n\n modulename = 'model_files.' + model_class_file.split('.')[0].replace('-', '_')\n\n '''\n We required users to define where the model class is located or follow\n some naming convention we have provided.\n '''\n model_class = getattr(importlib.import_module(modulename), model_class_name)\n\n # load & compile model\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = model_class().to(device)\n model.load_state_dict(torch.load(weights_filename, map_location=device))\n\n # Define Loss and optimizer function for the PyTorch model\n if LossFn:\n loss_fn = eval(LossFn)\n else:\n loss_fn = torch.nn.CrossEntropyLoss()\n if Optimizer:\n optimizer = eval(Optimizer)\n else:\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n # create pytorch classifier\n classifier = PyTorchClassifier(clip_values, model, loss_fn, optimizer, input_shape, nb_classes)\n\n # load test dataset\n x = np.load(dataset_filenamex)\n y = np.load(dataset_filenamey)\n\n # craft adversarial samples using FGSM\n crafter = FastGradientMethod(classifier, eps=epsilon)\n x_samples = crafter.generate(x)\n\n # obtain all metrics (robustness score, perturbation metric, reduction in confidence)\n metrics, y_pred_orig, y_pred_adv = get_metrics(model, x, x_samples, y)\n\n print(\"metrics:\", metrics)\n return metrics\n", "# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport keras.backend as k\nimport numpy as np\n\nfrom art.wrappers.wrapper import ClassifierWrapper\nfrom art.utils import load_mnist\n\nfrom tests.utils import master_seed, get_image_classifier_kr\n\nlogger = logging.getLogger(__name__)\n\nBATCH_SIZE = 10\nNB_TRAIN = 500\nNB_TEST = 100\n\n\nclass TestMixinWKerasClassifier(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n k.clear_session()\n k.set_learning_phase(1)\n\n # Get MNIST\n (x_train, y_train), (x_test, y_test), _, _ = load_mnist()\n x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST]\n cls.mnist = (x_train, y_train), (x_test, y_test)\n\n # Load small Keras model\n cls.model_mnist = get_image_classifier_kr()\n\n @classmethod\n def tearDownClass(cls):\n k.clear_session()\n\n def setUp(self):\n master_seed(seed=1234)\n\n def test_shapes(self):\n x_test, y_test = self.mnist[1]\n classifier = ClassifierWrapper(self.model_mnist)\n\n preds = classifier.predict(self.mnist[1][0])\n self.assertEqual(preds.shape, y_test.shape)\n\n self.assertEqual(classifier.nb_classes(), 10)\n\n class_grads = classifier.class_gradient(x_test[:11])\n self.assertEqual(class_grads.shape, tuple([11, 10] + list(x_test[1].shape)))\n\n loss_grads = classifier.loss_gradient(x_test[:11], y_test[:11])\n self.assertEqual(loss_grads.shape, x_test[:11].shape)\n\n def test_class_gradient(self):\n (_, _), (x_test, _) = self.mnist\n classifier = ClassifierWrapper(self.model_mnist)\n\n # Test all gradients label\n grads = classifier.class_gradient(x_test)\n\n self.assertTrue(np.array(grads.shape == (NB_TEST, 10, 28, 28, 1)).all())\n self.assertNotEqual(np.sum(grads), 0)\n\n # Test 1 gradient label = 5\n grads = classifier.class_gradient(x_test, label=5)\n\n self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 28, 28, 1)).all())\n self.assertNotEqual(np.sum(grads), 0)\n\n # Test a set of gradients label = array\n label = np.random.randint(5, size=NB_TEST)\n grads = classifier.class_gradient(x_test, label=label)\n\n self.assertTrue(np.array(grads.shape == (NB_TEST, 1, 28, 28, 1)).all())\n self.assertNotEqual(np.sum(grads), 0)\n\n def test_loss_gradient(self):\n (_, _), (x_test, y_test) = self.mnist\n classifier = ClassifierWrapper(self.model_mnist)\n\n # Test gradient\n grads = classifier.loss_gradient(x_test, y_test)\n\n self.assertTrue(np.array(grads.shape == (NB_TEST, 28, 28, 1)).all())\n self.assertNotEqual(np.sum(grads), 0)\n\n def test_layers(self):\n (_, _), (x_test, _), _, _ = load_mnist()\n x_test = x_test[:NB_TEST]\n\n classifier = ClassifierWrapper(self.model_mnist)\n self.assertEqual(len(classifier.layer_names), 3)\n\n layer_names = classifier.layer_names\n for i, name in enumerate(layer_names):\n act_i = classifier.get_activations(x_test, i, batch_size=128)\n act_name = classifier.get_activations(x_test, name, batch_size=128)\n self.assertAlmostEqual(np.sum(act_name - act_i), 0)\n\n def test_save(self):\n import os\n\n path = \"tmp\"\n filename = \"model.h5\"\n classifier = ClassifierWrapper(self.model_mnist)\n classifier.save(filename, path=path)\n self.assertTrue(os.path.isfile(os.path.join(path, filename)))\n\n # Remove saved file\n os.remove(os.path.join(path, filename))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.swapaxes", "numpy.argmax", "numpy.abs" ], [ "torch.cuda.is_available", "numpy.load", "torch.nn.CrossEntropyLoss", "torch.load" ], [ "numpy.sum", "numpy.array", "numpy.random.randint" ] ]
kiritowu/Deep-Learning
[ "baaec55a3b32f9e02ca3d834f1408f6736bdc170" ]
[ "RL/Snake-DQN/model/dqn_engineered.py" ]
[ "import random\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nimport gym\nimport wandb\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential, Input, Model\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, Concatenate\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.models import clone_model\n\nfrom utils import ReplayBuffer, Experience\n\nclass DQN:\n def __init__(\n self,\n env:gym.Env,\n combined_observation_space:Tuple[Tuple[int,int,int], int],\n lr:float,\n gamma:float,\n epsilon:float,\n epsilon_decay:float,\n target_update_interval: int = 100,\n log_wandb: bool=False,\n replay_buffer:Optional[ReplayBuffer]=None,\n fc_layers:Optional[List[int]]=None,\n conv_layers:Optional[List[int]]=None\n ):\n \"\"\"\n Construct a new 'Deep Q-Network' object.\n\n :param env: The environment of the game\n :param lr: The learning rate of the agent\n :param gamma: The amount of weight it gives to future rewards in the value function\n :param epsilon: The probability where we do not go with the “greedy” action with the highest Q-value but rather choose a random action\n :param epsilon_decay: The rate by which epsilon decreases after an episode\n :param target_update_interval: The interval between updates of the target network\n :param replay_buffer: Replay memory object to store and sample observations from for training.\n Defaults to double-end queue with maximum length of 500_000 steps.\n \"\"\"\n self.log_wandb = log_wandb\n self.env = env\n self.action_space = env.action_space\n self.combined_observation_space = combined_observation_space\n\n self.lr = lr\n self.gamma = gamma\n self.epsilon = epsilon\n self.epsilon_decay = epsilon_decay\n self.target_update_interval = target_update_interval\n self.rewards_list = []\n\n # store trajectories of experience when executing a policy in an environment\n self.buffer = replay_buffer if replay_buffer else ReplayBuffer(maxlen=2_500)\n self.batch_size = 64\n self.epsilon_min = 0.01\n # agents have either a dis- crete or a continuous action space\n self.num_action_space = 4\n\n\n self.fc_layers = [128,128,128] if not fc_layers else fc_layers\n assert len(self.fc_layers) >= 1, \"You need at least one hidden layer\"\n\n self.conv_layers = [32, 64, 128] if not conv_layers else conv_layers\n assert len(self.conv_layers) >= 1, \"You need at least one hidden layer\"\n\n self.model = self.initialize_model()\n self.model_target = clone_model(self.model)\n\n # Track the hyperparameters\n if self.log_wandb:\n wandb.config.update({\n \"lr\": self.lr,\n \"gamma\": self.gamma,\n \"epsilon\": self.epsilon,\n \"epsilon_decay\": self.epsilon_decay,\n \"target_update_interval\": self.target_update_interval,\n \"batch_size\": self.batch_size,\n \"fc_layers\": self.fc_layers\n })\n\n def initialize_model(self):\n conv_layers = self.conv_layers[:] # Make a copy\n first_conv_layer = conv_layers.pop(0)\n\n i1 = Input(shape=self.combined_observation_space[0])\n i2 = Input(shape=self.combined_observation_space[1])\n x = Conv2D(first_conv_layer,8,4, padding=\"same\", activation=\"relu\")(i1)\n for conv_layer in conv_layers:\n x = Conv2D(conv_layer,3,4,padding=\"same\", activation=\"relu\")(x)\n x = Flatten()(x)\n x = Concatenate(axis=1)([x,i2])\n \n layer = self.fc_layers[:] # Make a copy\n first_layer = layer.pop(0)\n \n x = Dense(first_layer, activation=\"relu\")(x)\n \n # Hidden fc_layers\n for layer in layer:\n x = Dense(layer, activation=\"relu\")(x)\n\n # the number of ending neurons is equal to the number of action space\n out = Dense(self.num_action_space, activation=\"linear\")(x)\n \n model = Model(inputs = [i1, i2], outputs = out)\n # Compile the model with MSE of TD-Error with Adam\n model.compile(loss=\"mean_squared_error\", optimizer=Adam(learning_rate=self.lr))\n return model\n\n def get_action(self, state):\n # a random action is chosen when a random chosen number is lower than the epsilon\n if np.random.rand() < self.epsilon:\n return random.randint(0,3)\n # if not, the model will predict the action with its current state\n predicted_actions = self.model.predict([tf.expand_dims(state[0], axis=0),tf.expand_dims(state[1], axis=0)])\n\n # returns the index of the actions with the highest score\n return np.argmax(predicted_actions[0])\n\n def update_weights(self):\n\n # buffer size check\n if len(self.buffer) < self.batch_size:\n return\n\n # randomly sample a replay memory with the size of the batch\n # getting the states, actions, rewards, next_state and done_list from the random sample\n states, actions, rewards, next_states, done_list = self.buffer.sample(self.batch_size, dqn=True)\n\n # calculate the loss to create a target vector for the model to fit with the states\n targets = rewards + self.gamma * (np.max(self.model_target.predict_on_batch([\n np.concatenate(next_states[0]).reshape(-1, *self.combined_observation_space[0]),\n np.concatenate(next_states[1]).reshape(-1, self.combined_observation_space[1])\n ]), axis=1)) * (1 - done_list)\n target_vec = self.model.predict_on_batch([\n np.concatenate(states[0]).reshape(-1, *self.combined_observation_space[0]),\n np.concatenate(states[1]).reshape(-1, self.combined_observation_space[1])\n ])\n indexes = np.array([i for i in range(self.batch_size)])\n target_vec[[indexes], [actions]] = targets\n\n # fit the model with the states and the target vector for one iteration\n self.model.fit([\n np.concatenate(states[0]).reshape(-1, *self.combined_observation_space[0]),\n np.concatenate(states[1]).reshape(-1, self.combined_observation_space[1])\n ], target_vec, epochs=1, verbose=0)\n\n def _update_target(self, target_weights, weights):\n for target_weight, weight in zip(target_weights, weights):\n target_weight.assign(weight)\n\n def train(self, num_episodes=1000, mean_stopping=True):\n\n # iterate over the number of episodes\n for episode in range(num_episodes):\n \n state = self.env.reset()\n reward_for_episode = 0\n max_num_steps = 1000\n \n for step in range(max_num_steps):\n # get the action for the current state\n action = self.get_action(state)\n if isinstance(action, tf.Tensor):\n action = action.numpy()\n # get the next_state, reward, done and info after running the action\n next_state, reward, done, info = self.env.step(int(action))\n \n # store the experience in replay memory\n self.buffer.append(Experience(state, action, reward, next_state, done))\n \n # add up rewards\n reward_for_episode += reward\n state = next_state\n \n # train dqn\n self.update_weights()\n\n # Every k steps, copy actual network weights to the target network weights\n if (step + 1) % self.target_update_interval == 0:\n self._update_target(self.model_target.variables, self.model.variables)\n\n if done: break\n\n self.rewards_list.append(reward_for_episode)\n\n # decay the epsilon after each episode\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n # check for terminal condition\n last_rewards_mean = np.mean(self.rewards_list[-100:])\n if last_rewards_mean > 250 and mean_stopping:\n print(\"DQN Training Complete...\")\n break\n \n print(\"[{:0>3}] Reward: {: >8.3f} | Avg Reward: {: >8.3f} | e: {:.3f} | Episode Length: {:}\"\n .format(episode, reward_for_episode, last_rewards_mean, self.epsilon, step))\n \n if self.log_wandb:\n wandb.log({\n \"Episode\": episode,\n \"Reward\": reward_for_episode,\n \"Avg-Reward-100e\": last_rewards_mean,\n \"Epsilon\": self.epsilon,\n \"Episode Length\": step\n })\n\n def save(self, path:str):\n self.model.save(path)" ]
[ [ "numpy.concatenate", "numpy.random.rand", "tensorflow.expand_dims", "tensorflow.keras.layers.Flatten", "numpy.mean", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Model", "numpy.argmax", "tensorflow.keras.models.clone_model", "tensorflow.keras.Input", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Concatenate" ] ]
lisadunlap/ray
[ "882ae976683f58e8f77ccb59973e2a987623eb63" ]
[ "rllib/policy/dynamic_tf_policy.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport logging\nimport numpy as np\n\nfrom ray.rllib.policy.policy import Policy\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.tf_policy import TFPolicy\nfrom ray.rllib.models.catalog import ModelCatalog\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils import try_import_tf\nfrom ray.rllib.utils.debug import log_once, summarize\nfrom ray.rllib.utils.tracking_dict import UsageTrackingDict\n\ntf = try_import_tf()\n\nlogger = logging.getLogger(__name__)\n\n\nclass DynamicTFPolicy(TFPolicy):\n \"\"\"A TFPolicy that auto-defines placeholders dynamically at runtime.\n\n Initialization of this class occurs in two phases.\n * Phase 1: the model is created and model variables are initialized.\n * Phase 2: a fake batch of data is created, sent to the trajectory\n postprocessor, and then used to create placeholders for the loss\n function. The loss and stats functions are initialized with these\n placeholders.\n\n Initialization defines the static graph.\n\n Attributes:\n observation_space (gym.Space): observation space of the policy.\n action_space (gym.Space): action space of the policy.\n config (dict): config of the policy\n model (TorchModel): TF model instance\n dist_class (type): TF action distribution class\n \"\"\"\n\n def __init__(self,\n obs_space,\n action_space,\n config,\n loss_fn,\n stats_fn=None,\n grad_stats_fn=None,\n before_loss_init=None,\n make_model=None,\n action_sampler_fn=None,\n existing_inputs=None,\n existing_model=None,\n get_batch_divisibility_req=None,\n obs_include_prev_action_reward=True):\n \"\"\"Initialize a dynamic TF policy.\n\n Arguments:\n observation_space (gym.Space): Observation space of the policy.\n action_space (gym.Space): Action space of the policy.\n config (dict): Policy-specific configuration data.\n loss_fn (func): function that returns a loss tensor the policy\n graph, and dict of experience tensor placeholders\n stats_fn (func): optional function that returns a dict of\n TF fetches given the policy and batch input tensors\n grad_stats_fn (func): optional function that returns a dict of\n TF fetches given the policy and loss gradient tensors\n before_loss_init (func): optional function to run prior to loss\n init that takes the same arguments as __init__\n make_model (func): optional function that returns a ModelV2 object\n given (policy, obs_space, action_space, config).\n All policy variables should be created in this function. If not\n specified, a default model will be created.\n action_sampler_fn (func): optional function that returns a\n tuple of action and action logp tensors given\n (policy, model, input_dict, obs_space, action_space, config).\n If not specified, a default action distribution will be used.\n existing_inputs (OrderedDict): when copying a policy, this\n specifies an existing dict of placeholders to use instead of\n defining new ones\n existing_model (ModelV2): when copying a policy, this specifies\n an existing model to clone and share weights with\n get_batch_divisibility_req (func): optional function that returns\n the divisibility requirement for sample batches\n obs_include_prev_action_reward (bool): whether to include the\n previous action and reward in the model input\n \"\"\"\n self.config = config\n self._loss_fn = loss_fn\n self._stats_fn = stats_fn\n self._grad_stats_fn = grad_stats_fn\n self._obs_include_prev_action_reward = obs_include_prev_action_reward\n\n # Setup standard placeholders\n prev_actions = None\n prev_rewards = None\n if existing_inputs is not None:\n obs = existing_inputs[SampleBatch.CUR_OBS]\n if self._obs_include_prev_action_reward:\n prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]\n prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]\n else:\n obs = tf.placeholder(\n tf.float32,\n shape=[None] + list(obs_space.shape),\n name=\"observation\")\n if self._obs_include_prev_action_reward:\n prev_actions = ModelCatalog.get_action_placeholder(\n action_space)\n prev_rewards = tf.placeholder(\n tf.float32, [None], name=\"prev_reward\")\n\n self._input_dict = {\n SampleBatch.CUR_OBS: obs,\n SampleBatch.PREV_ACTIONS: prev_actions,\n SampleBatch.PREV_REWARDS: prev_rewards,\n \"is_training\": self._get_is_training_placeholder(),\n }\n self._seq_lens = tf.placeholder(\n dtype=tf.int32, shape=[None], name=\"seq_lens\")\n\n # Setup model\n if action_sampler_fn:\n if not make_model:\n raise ValueError(\n \"make_model is required if action_sampler_fn is given\")\n self.dist_class = None\n else:\n self.dist_class, logit_dim = ModelCatalog.get_action_dist(\n action_space, self.config[\"model\"])\n\n if existing_model:\n self.model = existing_model\n elif make_model:\n self.model = make_model(self, obs_space, action_space, config)\n else:\n self.model = ModelCatalog.get_model_v2(\n obs_space,\n action_space,\n logit_dim,\n self.config[\"model\"],\n framework=\"tf\")\n\n if existing_inputs:\n self._state_in = [\n v for k, v in existing_inputs.items()\n if k.startswith(\"state_in_\")\n ]\n if self._state_in:\n self._seq_lens = existing_inputs[\"seq_lens\"]\n else:\n self._state_in = [\n tf.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)\n for s in self.model.get_initial_state()\n ]\n\n model_out, self._state_out = self.model(self._input_dict,\n self._state_in, self._seq_lens)\n\n # Setup action sampler\n if action_sampler_fn:\n action_sampler, action_logp = action_sampler_fn(\n self, self.model, self._input_dict, obs_space, action_space,\n config)\n else:\n action_dist = self.dist_class(model_out, self.model)\n action_sampler = action_dist.sample()\n action_logp = action_dist.sampled_action_logp()\n\n # Phase 1 init\n sess = tf.get_default_session() or tf.Session()\n if get_batch_divisibility_req:\n batch_divisibility_req = get_batch_divisibility_req(self)\n else:\n batch_divisibility_req = 1\n TFPolicy.__init__(\n self,\n obs_space,\n action_space,\n sess,\n obs_input=obs,\n action_sampler=action_sampler,\n action_logp=action_logp,\n loss=None, # dynamically initialized on run\n loss_inputs=[],\n model=self.model,\n state_inputs=self._state_in,\n state_outputs=self._state_out,\n prev_action_input=prev_actions,\n prev_reward_input=prev_rewards,\n seq_lens=self._seq_lens,\n max_seq_len=config[\"model\"][\"max_seq_len\"],\n batch_divisibility_req=batch_divisibility_req)\n\n # Phase 2 init\n before_loss_init(self, obs_space, action_space, config)\n if not existing_inputs:\n self._initialize_loss()\n\n @override(TFPolicy)\n def copy(self, existing_inputs):\n \"\"\"Creates a copy of self using existing input placeholders.\"\"\"\n\n # Note that there might be RNN state inputs at the end of the list\n if self._state_inputs:\n num_state_inputs = len(self._state_inputs) + 1\n else:\n num_state_inputs = 0\n if len(self._loss_inputs) + num_state_inputs != len(existing_inputs):\n raise ValueError(\"Tensor list mismatch\", self._loss_inputs,\n self._state_inputs, existing_inputs)\n for i, (k, v) in enumerate(self._loss_inputs):\n if v.shape.as_list() != existing_inputs[i].shape.as_list():\n raise ValueError(\"Tensor shape mismatch\", i, k, v.shape,\n existing_inputs[i].shape)\n # By convention, the loss inputs are followed by state inputs and then\n # the seq len tensor\n rnn_inputs = []\n for i in range(len(self._state_inputs)):\n rnn_inputs.append((\"state_in_{}\".format(i),\n existing_inputs[len(self._loss_inputs) + i]))\n if rnn_inputs:\n rnn_inputs.append((\"seq_lens\", existing_inputs[-1]))\n input_dict = OrderedDict(\n [(k, existing_inputs[i])\n for i, (k, _) in enumerate(self._loss_inputs)] + rnn_inputs)\n instance = self.__class__(\n self.observation_space,\n self.action_space,\n self.config,\n existing_inputs=input_dict,\n existing_model=self.model)\n\n instance._loss_input_dict = input_dict\n loss = instance._do_loss_init(input_dict)\n loss_inputs = [(k, existing_inputs[i])\n for i, (k, _) in enumerate(self._loss_inputs)]\n\n TFPolicy._initialize_loss(instance, loss, loss_inputs)\n if instance._grad_stats_fn:\n instance._stats_fetches.update(\n instance._grad_stats_fn(instance, input_dict, instance._grads))\n return instance\n\n @override(Policy)\n def get_initial_state(self):\n if self.model:\n return self.model.get_initial_state()\n else:\n return []\n\n def is_recurrent(self):\n return len(self._state_in) > 0\n\n def num_state_tensors(self):\n return len(self._state_in)\n\n def _initialize_loss(self):\n def fake_array(tensor):\n shape = tensor.shape.as_list()\n shape = [s if s is not None else 1 for s in shape]\n return np.zeros(shape, dtype=tensor.dtype.as_numpy_dtype)\n\n dummy_batch = {\n SampleBatch.CUR_OBS: fake_array(self._obs_input),\n SampleBatch.NEXT_OBS: fake_array(self._obs_input),\n SampleBatch.DONES: np.array([False], dtype=np.bool),\n SampleBatch.ACTIONS: fake_array(\n ModelCatalog.get_action_placeholder(self.action_space)),\n SampleBatch.REWARDS: np.array([0], dtype=np.float32),\n }\n if self._obs_include_prev_action_reward:\n dummy_batch.update({\n SampleBatch.PREV_ACTIONS: fake_array(self._prev_action_input),\n SampleBatch.PREV_REWARDS: fake_array(self._prev_reward_input),\n })\n state_init = self.get_initial_state()\n state_batches = []\n for i, h in enumerate(state_init):\n dummy_batch[\"state_in_{}\".format(i)] = np.expand_dims(h, 0)\n dummy_batch[\"state_out_{}\".format(i)] = np.expand_dims(h, 0)\n state_batches.append(np.expand_dims(h, 0))\n if state_init:\n dummy_batch[\"seq_lens\"] = np.array([1], dtype=np.int32)\n for k, v in self.extra_compute_action_fetches().items():\n dummy_batch[k] = fake_array(v)\n\n # postprocessing might depend on variable init, so run it first here\n self._sess.run(tf.global_variables_initializer())\n\n postprocessed_batch = self.postprocess_trajectory(\n SampleBatch(dummy_batch))\n\n # model forward pass for the loss (needed after postprocess to\n # overwrite any tensor state from that call)\n self.model(self._input_dict, self._state_in, self._seq_lens)\n\n if self._obs_include_prev_action_reward:\n train_batch = UsageTrackingDict({\n SampleBatch.PREV_ACTIONS: self._prev_action_input,\n SampleBatch.PREV_REWARDS: self._prev_reward_input,\n SampleBatch.CUR_OBS: self._obs_input,\n })\n loss_inputs = [\n (SampleBatch.PREV_ACTIONS, self._prev_action_input),\n (SampleBatch.PREV_REWARDS, self._prev_reward_input),\n (SampleBatch.CUR_OBS, self._obs_input),\n ]\n else:\n train_batch = UsageTrackingDict({\n SampleBatch.CUR_OBS: self._obs_input,\n })\n loss_inputs = [\n (SampleBatch.CUR_OBS, self._obs_input),\n ]\n\n for k, v in postprocessed_batch.items():\n if k in train_batch:\n continue\n elif v.dtype == np.object:\n continue # can't handle arbitrary objects in TF\n elif k == \"seq_lens\" or k.startswith(\"state_in_\"):\n continue\n shape = (None, ) + v.shape[1:]\n dtype = np.float32 if v.dtype == np.float64 else v.dtype\n placeholder = tf.placeholder(dtype, shape=shape, name=k)\n train_batch[k] = placeholder\n\n for i, si in enumerate(self._state_in):\n train_batch[\"state_in_{}\".format(i)] = si\n train_batch[\"seq_lens\"] = self._seq_lens\n\n if log_once(\"loss_init\"):\n logger.debug(\n \"Initializing loss function with dummy input:\\n\\n{}\\n\".format(\n summarize(train_batch)))\n\n self._loss_input_dict = train_batch\n loss = self._do_loss_init(train_batch)\n for k in sorted(train_batch.accessed_keys):\n if k != \"seq_lens\" and not k.startswith(\"state_in_\"):\n loss_inputs.append((k, train_batch[k]))\n\n TFPolicy._initialize_loss(self, loss, loss_inputs)\n if self._grad_stats_fn:\n self._stats_fetches.update(\n self._grad_stats_fn(self, train_batch, self._grads))\n self._sess.run(tf.global_variables_initializer())\n\n def _do_loss_init(self, train_batch):\n loss = self._loss_fn(self, self.model, self.dist_class, train_batch)\n if self._stats_fn:\n self._stats_fetches.update(self._stats_fn(self, train_batch))\n # override the update ops to be those of the model\n self._update_ops = self.model.update_ops()\n return loss\n" ]
[ [ "numpy.expand_dims", "numpy.array", "numpy.zeros" ] ]
seedlit/ahn3-downloader
[ "c08d810abf03e5ecee3c9eb3827afc1665995c24" ]
[ "download_ahn3_elevation_data.py" ]
[ "# this script finds all the intersecting tiles for a given input AOI, and then downloads corresponding\n# 0.5 meter AHN3 DSM and DTM tiles\n\n\nfrom shapely.geometry import Polygon\nimport geopandas as gpd\nimport pandas as pd\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nimport urllib.request\nimport zipfile\nimport os\nimport argparse\n\n\ndef get_intersecting_tile_names(bounds_csv_path, aoi_path):\n print(\"Finding all the intersecting tile names\")\n # all the tile bounds are in EPSG 28992\n # reproject the aoi bounds to EPSG 28992\n # define aoi bounds\n aoi_df = gpd.read_file(aoi_path)\n if aoi_df.crs != 28992:\n aoi_df = aoi_df.to_crs(epsg=28992)\n tile_names_list = []\n # read csv into dataframe\n tiles_bounds_df = pd.read_csv(bounds_csv_path)\n for i in tqdm(range(len(tiles_bounds_df))):\n tile_name = tiles_bounds_df[\"tile_name\"].iloc[i]\n tile_left = tiles_bounds_df[\"left\"].iloc[i]\n tile_right = tiles_bounds_df[\"right\"].iloc[i]\n tile_bottom = tiles_bounds_df[\"bottom\"].iloc[i]\n tile_top = tiles_bounds_df[\"top\"].iloc[i]\n # generate shapely geometry\n tile_poly = gpd.GeoSeries(\n [\n Polygon(\n [\n (tile_left, tile_bottom),\n (tile_right, tile_bottom),\n (tile_right, tile_top),\n (tile_left, tile_top),\n ]\n )\n ]\n )\n tile_df = gpd.GeoDataFrame(\n {\"geometry\": tile_poly, \"df1\": [1]}, crs=\"EPSG:28992\"\n )\n if aoi_df.intersects(tile_df).any():\n tile_names_list.append(tile_name)\n print(\"the intersecting tiles are \", tile_names_list)\n return tile_names_list\n\n\ndef download_data(download_url, out_path):\n urllib.request.urlretrieve(download_url, out_path)\n\n\ndef extract_zip(src_zip_file, out_dir):\n zip_name = src_zip_file.split(\"/\")[-1].replace(\".zip\", \"\")\n zip_data = zipfile.ZipFile(src_zip_file)\n zipinfos = zip_data.infolist()\n # iterate through each file \n os.chdir(out_dir)\n for zipinfo in zipinfos:\n # Rename the zip content\n zipinfo.filename = \"{}.tif\".format(zip_name)\n zip_data.extract(zipinfo)\n os.remove(os.path.join(os.path.join(os.getcwd(), \"{}.zip\".format(zip_name))))\n return os.path.join(out_dir, \"{}.tif\".format(zip_name))\n\n\ndef download_and_extract(tile_name, out_dir, download_url):\n try:\n out_path = os.path.join(out_dir, \"{}.zip\".format(tile_name))\n download_data(download_url, out_path)\n tif_path = extract_zip(out_path, out_dir)\n # return tif_path\n except Exception as e:\n print(\"some error in \", tile_name)\n print(\"error \", e)\n\n\ndef download_tiles_multiprocess(tile_names_list, out_dir, num_processes):\n download_task_list = []\n dsm_dir = os.path.join(out_dir, \"dsm\")\n os.makedirs(dsm_dir, exist_ok=True)\n dtm_dir = os.path.join(out_dir, \"dtm\")\n os.makedirs(dtm_dir, exist_ok=True)\n for tile_name in tile_names_list:\n dsm_url = \"https://download.pdok.nl/rws/ahn3/v1_0/05m_dsm/R_{}.ZIP\".format(\n tile_name\n )\n dtm_url = \"https://download.pdok.nl/rws/ahn3/v1_0/05m_dtm/M_{}.ZIP\".format(\n tile_name\n )\n download_task_list.append([tile_name, dsm_dir, dsm_url])\n download_task_list.append([tile_name, dtm_dir, dtm_url])\n print(\"Dowloding {} tiles\".format(len(download_task_list)))\n p = Pool(num_processes)\n p.starmap(download_and_extract, download_task_list)\n p.close()\n p.join()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"Download AHN3 DSM and DTM tiles for input AOI\"\n )\n parser.add_argument(\"--aoi\", help=\"aoi geojson/shpefile path string\")\n parser.add_argument(\n \"--out_dir\",\n help=\"path to out directory where files will be downloaded\",\n type=str,\n default=\"downloaded_tiles\",\n )\n parser.add_argument(\n \"--num_processes\",\n help=\"Number of processes to run in parallel, to speed up downloading\",\n type=int,\n default=10,\n )\n\n args = parser.parse_args()\n aoi_path = args.aoi\n out_dir = args.out_dir\n num_processes = args.num_processes\n\n os.makedirs(out_dir, exist_ok=True)\n bounds_csv_path = \"resources/ahn3_tile_bounds.csv\"\n\n target_tile_names = get_intersecting_tile_names(bounds_csv_path, aoi_path)\n download_tiles_multiprocess(target_tile_names, out_dir, num_processes)\n print(\"Data downloaded at \", os.path.join(os.getcwd(), out_dir))\n" ]
[ [ "pandas.read_csv" ] ]
kozodoi/pet_pawpularity
[ "fc47dce315cf5ebf10e1d501595d917d0ff062b9" ]
[ "code/run_inference.py" ]
[ "from utilities import *\nfrom model import get_model\nfrom data import get_data, get_loaders\nfrom augmentations import get_augs\nfrom test_epoch import test_epoch\n\nimport gc\nimport neptune\nfrom accelerate import Accelerator, DistributedType\nimport pandas as pd\nimport numpy as np\n\n\n\ndef run_inference(df, \n df_old,\n df_test, \n CFG, \n run = None):\n \n '''\n Run inference loop\n '''\n \n # tests\n assert isinstance(CFG, dict), 'CFG has to be a dict with parameters'\n assert isinstance(df, pd.DataFrame), 'df has to be a pandas dataframe'\n \n # placeholders\n oof = None\n sub = None\n \n # inference\n for fold in range(CFG['num_folds']):\n \n # initialize accelerator\n accelerator = Accelerator(device_placement = True,\n fp16 = CFG['use_fp16'],\n split_batches = False)\n if CFG['device'] == 'GPU':\n accelerator.state.device = torch.device('cuda:{}'.format(CFG['device_index']))\n\n # feedback\n accelerator.print('-' * 55)\n accelerator.print('FOLD {:d}/{:d}'.format(fold + 1, CFG['num_folds'])) \n accelerator.print('-' * 55) \n \n # get data\n df_trn, df_val = get_data(df = df, \n df_old = df_old,\n fold = fold, \n CFG = CFG, \n accelerator = accelerator, \n silent = True) \n\n # get test loader\n _, val_loader = get_loaders(df_train = df_trn, \n df_valid = df_val, \n CFG = CFG, \n accelerator = accelerator, \n labeled = False, \n silent = True) \n _, test_loader = get_loaders(df_train = df_trn, \n df_valid = df_test, \n CFG = CFG, \n accelerator = accelerator, \n labeled = False, \n silent = True) \n \n # prepare model\n model = get_model(CFG = CFG, \n pretrained = CFG['out_path'] + 'weights_fold{}.pth'.format(int(fold)))\n \n # handle device placement\n model, val_loader, test_loader = accelerator.prepare(model, val_loader, test_loader)\n \n # inference for validation data\n if CFG['predict_oof']:\n \n # produce OOF preds\n val_preds = test_epoch(loader = val_loader, \n model = model,\n CFG = CFG,\n accelerator = accelerator,\n num_tta = CFG['num_tta'])\n \n # store OOF preds\n val_preds_df = pd.DataFrame(val_preds, columns = ['pred'])\n val_preds_df = pd.concat([df_val, val_preds_df], axis = 1)\n oof = pd.concat([oof, val_preds_df], axis = 0).reset_index(drop = True)\n \n # inference for test data\n if CFG['predict_test']:\n \n # produce test preds\n test_preds = test_epoch(loader = test_loader, \n model = model,\n CFG = CFG,\n accelerator = accelerator,\n num_tta = CFG['num_tta'])\n \n # store test preds\n test_preds_df = pd.DataFrame(test_preds, columns = ['pred_fold{}'.format(int(fold))])\n sub = pd.concat([sub, test_preds_df], axis = 1)\n \n # clear memory\n del model, val_loader, test_loader\n del accelerator\n gc.collect()\n \n # export OOF preds\n if CFG['predict_oof']:\n oof.to_csv(CFG['out_path'] + 'oof.csv', index = False)\n if CFG['tracking']:\n run['oof'].upload(CFG['out_path'] + 'oof.csv')\n \n # export test preds\n if CFG['predict_test']:\n sub = pd.concat([df_test['Id'], sub], axis = 1)\n sub.to_csv(CFG['out_path'] + 'submission.csv', index = False)\n if CFG['tracking']:\n run['submission'].upload(CFG['out_path'] + 'submission.csv')\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
Ikomia-dev/ikomia-oakd
[ "7a048dd82fefc56a9f28b889f7d988d931173c9a", "7a048dd82fefc56a9f28b889f7d988d931173c9a" ]
[ "_examples/pose_estimation.py", "V-generalization/examples/stereo_neural_inference/face_landmarks_detector_v2.py" ]
[ "from utils.compute import get_landmark_3d, get_vector_intersection\nfrom utils.visualize import HumanPoseVisualizer\nfrom utils.OakRunner import OakRunner\nfrom utils.pose import getKeypoints\nfrom utils.draw import displayFPS\nfrom pathlib import Path\nimport depthai as dai\nimport numpy as np\nimport cv2\n\n\n\nfps_limit = 3\nframe_width, frame_height = 456, 256\npairs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],\n [1, 0], [0, 14], [14, 16], [0, 15], [15, 17]]\ncolors = [[255, 100, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [0, 255, 0],\n [100, 200, 255], [255, 0, 255], [0, 255, 0], [100, 200, 255], [255, 0, 255], [255, 0, 0], [0, 0, 255],\n [0, 200, 200], [0, 0, 255], [0, 200, 200], [0, 0, 0]]\nthreshold = 0.3\nnb_points = 18\n\n\n\ndef init(runner, device):\n calibration = device.readCalibration()\n left_intrinsics = np.array(calibration.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, 1280, 720))\n right_intrinsics = np.array(calibration.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720))\n runner.custom_arguments[\"focal_length_left\"] = left_intrinsics[0,0]\n runner.custom_arguments[\"focal_length_right\"] = right_intrinsics[0,0]\n runner.custom_arguments[\"size_left\"] = left_intrinsics[0,2]\n runner.custom_arguments[\"size_right\"] = right_intrinsics[0,2]\n\n runner.custom_arguments[\"visualizer\"] = HumanPoseVisualizer(300, 300, [runner.left_camera_location, runner.right_camera_location], colors=colors, pairs=pairs)\n runner.custom_arguments[\"visualizer\"].start()\n\n\ndef process(runner):\n spatial_vectors = dict()\n for side in [\"left\", \"right\"]:\n frame = runner.output_queues[side+\"_cam\"].get().getCvFrame()\n nn_current_output = runner.output_queues[\"nn_\"+side].get()\n \n heatmaps = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L2')).reshape((1, 19, 32, 57)).astype('float32')\n pafs = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L1')).reshape((1, 38, 32, 57)).astype('float32')\n outputs = np.concatenate((heatmaps, pafs), axis=1)\n\n spatial_vectors[side] = []\n landmarks = []\n for i in range(nb_points):\n probMap = outputs[0, i, :, :]\n probMap = cv2.resize(probMap, (frame_width, frame_height))\n keypoints = getKeypoints(probMap, threshold)\n if(len(keypoints) > 0 and len(keypoints[0]) > 1):\n spatial_vectors[side].append(np.array(get_landmark_3d((keypoints[0][0]/frame_width, keypoints[0][1]/frame_height), focal_length=runner.custom_arguments[\"focal_length_\"+side], size=runner.custom_arguments[\"size_\"+side])))\n landmarks.append([keypoints[0][0], keypoints[0][1]])\n cv2.circle(frame, (keypoints[0][0], keypoints[0][1]), 5, (colors[i][2], colors[i][1], colors[i][0]), -1, cv2.LINE_AA) # draw keypoint\n else:\n spatial_vectors[side].append(keypoints) # insert empty array if the keypoint is not detected with enough confidence\n landmarks.append(keypoints)\n\n for pair in pairs:\n if(np.alltrue([len(landmarks[i])==2 for i in pair])):\n color = [0, 0, 0]\n for i in range(3):\n color[i] += colors[pair[0]][i]/2\n color[i] += colors[pair[1]][i]/2\n cv2.line(frame, (landmarks[pair[0]][0], landmarks[pair[0]][1]), (landmarks[pair[1]][0], landmarks[pair[1]][1]), (color[2], color[1], color[0]), 3, cv2.LINE_AA)\n\n displayFPS(frame, runner.getFPS())\n cv2.imshow(side, frame)\n\n # Determined depth to accuratly locate landmarks in space\n landmark_spatial_locations = []\n for i in range(nb_points):\n landmark_spatial_locations.append(np.array(get_vector_intersection(spatial_vectors[\"left\"][i], runner.left_camera_location, spatial_vectors[\"right\"][i], runner.right_camera_location)))\n runner.custom_arguments[\"visualizer\"].setLandmarks(landmark_spatial_locations)\n\n\n\nrunner = OakRunner() \n\nfor side in [\"left\", \"right\"]:\n if(side == \"left\"):\n runner.setLeftCamera(frame_width, frame_height)\n runner.getLeftCamera().setFps(fps_limit)\n manip = runner.getLeftCameraManip()\n else:\n runner.setRightCamera(frame_width, frame_height)\n runner.getRightCamera().setFps(fps_limit)\n manip = runner.getRightCameraManip()\n\n manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)\n runner.addNeuralNetworkModel(stream_name=\"nn_\"+side, path=str(Path(__file__).parent) + \"/../_models/pose_estimation.blob\", handle_mono_depth=False)\n manip.out.link(runner.neural_networks[\"nn_\"+side].input) # link transformed video stream to neural network entry\n\nrunner.run(process=process, init=init)", "from pathlib import Path\nimport depthai as dai\nimport numpy as np\nimport cv2\nimport sys\n\n# Importing from parent folder\nsys.path.insert(0, str(Path(__file__).parent.parent.parent)) # move to parent path\nfrom utils.draw import drawROI, displayFPS\nfrom utils.OakRunner import OakRunner\nfrom utils.compute import to_planar\n\n\n\nframe_width, frame_height = 300, 300\n\n\n\n# Function called before entering inside the process loop, useful to set few arguments\ndef init(runner, device):\n runner.custom_arguments[\"required_confidence\"] = 0.2\n\n\n# Function called inside the process loop, useful to apply any treatment\ndef process(runner):\n for side in [\"left\", \"right\"]:\n frame = runner.output_queues[side+\"_cam\"].get().getCvFrame()\n faces_data = runner.output_queues[\"nn_\"+side+\"_faces\"].get().getFirstLayerFp16()\n\n if(faces_data[2] > runner.custom_arguments[\"required_confidence\"]):\n # Get pixels instead of percentages\n xmin = int(faces_data[3]*frame_width) if faces_data[3]>0 else 0\n ymin = int(faces_data[4]*frame_height) if faces_data[4]>0 else 0\n xmax = int(faces_data[5]*frame_width) if faces_data[5]<1 else frame_width\n ymax = int(faces_data[6]*frame_height) if faces_data[6]<1 else frame_height\n\n # Compute the face to get landmarks\n land_data = dai.NNData()\n planar_cropped_face = to_planar(frame[ymin:ymax, xmin:xmax], (48, 48))\n land_data.setLayer(\"0\", planar_cropped_face)\n runner.input_queues[\"nn_\"+side+\"_landmarks\"].send(land_data)\n output = runner.output_queues[\"nn_\"+side+\"_landmarks\"].get().getFirstLayerFp16()\n landmarks = np.array(output).reshape(5,2)\n\n # Draw detections\n drawROI(frame, (xmin,ymin), (xmax,ymax), color=(0,200,230))\n for x,y in landmarks:\n cv2.circle(frame, (int(x*(xmax-xmin))+xmin,int(y*(ymax-ymin))+ymin), 2, (0,0,255))\n\n displayFPS(frame, runner.getFPS())\n cv2.imshow(side, frame)\n\n\n\nrunner = OakRunner()\n\nfor side in [\"left\", \"right\"]:\n if(side == \"left\"):\n runner.setLeftCamera(frame_width, frame_height)\n face_manip = runner.getLeftCameraManip()\n else:\n runner.setRightCamera(frame_width, frame_height)\n face_manip = runner.getRightCameraManip()\n\n face_manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)\n runner.addNeuralNetworkModel(stream_name=\"nn_\"+side+\"_faces\", path=str(Path(__file__).parent) + \"/../../../_models/face_detection.blob\", handle_mono_depth=False)\n face_manip.out.link(runner.neural_networks[\"nn_\"+side+\"_faces\"].input) # link transformed video stream to neural network entry\n\n runner.addNeuralNetworkModel(stream_name=\"nn_\"+side+\"_landmarks\", path=str(Path(__file__).parent) + \"/../../../_models/tiny_face_landmarks.blob\", handle_mono_depth=False)\n\nrunner.run(process=process, init=init)" ]
[ [ "numpy.concatenate" ], [ "numpy.array" ] ]
micqu/hotel-challenge
[ "9373d5bd69a48e22b043b1410a57ec051f63dd45" ]
[ "main.py" ]
[ "import numpy as np\nfrom torch import nn\nimport torch.optim as optim\nimport torch\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport data_loader as dl\nimport time\nimport copy\nimport utility\nimport yaml\nimport trainer\nfrom PIL import Image\nfrom os import path\nImage.MAX_IMAGE_PIXELS = None\nfrom scipy.io import savemat\nfrom sklearn.model_selection import train_test_split\nfrom torchvision import transforms\nimport os.path\nfrom os import path\n\nBATCH_SIZE = 32\nEPOCHS = 100\nLR = 0.001\nANNEAL_STRAT = \"cos\"\nFEATURE_EXTRACT = False\nAPPLY_ZCA_TRANS = True\nDATA_DIR = 'data/train_images'\nNETS = ['resnext'] # train on resnext\nIMAGE_SIZES = [64, 128, 224] # train for 4 resolutions\n\ndef main():\n # Load the meta data file\n df = pd.read_csv('./data/train.csv')\n df, label_encoder = utility.encode_labels(df)\n num_classes = len(df['label'].value_counts())\n np.save('./data/label_encoder_classes.npy', label_encoder.classes_)\n \n # Generate the ZCA matrix if enabled\n for image_size in IMAGE_SIZES: # train for every res\n if APPLY_ZCA_TRANS:\n print(\"Making ZCA matrix ...\")\n data_loader = dl.get_full_data_loader(df, data_dir=DATA_DIR,\n batch_size=BATCH_SIZE,\n image_size=image_size)\n train_dataset_arr = next(iter(data_loader))[0].numpy()\n zca = utility.ZCA()\n zca.fit(train_dataset_arr)\n zca_dic = {\"zca_matrix\": zca.ZCA_mat, \"zca_mean\": zca.mean}\n savemat(\"./data/zca_data.mat\", zca_dic)\n print(\"Completed making ZCA matrix\")\n\n # Define normalization\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n\n # Define specific transforms\n train_transform = transforms.Compose([\n utility.AddPadding(),\n transforms.Resize((image_size,image_size)),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomRotation(degrees=(-90, 90)),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.ColorJitter(.4,.4,.4),\n transforms.ToTensor(),\n normalize\n ])\n valid_transform = transforms.Compose([\n utility.AddPadding(),\n transforms.Resize((image_size,image_size)),\n transforms.ToTensor(),\n normalize\n ])\n \n # Create a train and valid dataset\n train_dataset = dl.HotelImagesDataset(df, root_dir=DATA_DIR,\n transform=train_transform)\n valid_dataset = dl.HotelImagesDataset(df, root_dir=DATA_DIR,\n transform=valid_transform)\n \n # Get a train and valid data loader\n train_loader, valid_loader = dl.get_train_valid_loader(train_dataset,\n valid_dataset,\n batch_size=BATCH_SIZE,\n random_seed=0)\n for net_type in NETS: # train for every net\n model = utility.initialize_net(num_classes, net_type,\n feature_extract=FEATURE_EXTRACT)\n \n # If old model exists, take state from it\n if path.exists(f\"./models/model_{net_type}.pt\"):\n print(\"Resuming training on trained model ...\")\n model = utility.load_latest_model(model, f'./models/model_{net_type}.pt')\n \n # Gather the parameters to be optimized/updated in this run.\n params_to_update = utility.get_model_params_to_train(model, FEATURE_EXTRACT)\n \n # Send model to GPU\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \n model = model.to(device)\n\n # Make criterion\n criterion = nn.CrossEntropyLoss()\n \n # Make optimizer + scheduler\n optimizer = torch.optim.SGD(params_to_update, lr=0.01, momentum=0.9)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n factor=0.01,\n patience=3)\n\n trained_model = trainer.train_model(device=device,\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n train_loader=train_loader,\n valid_loader=valid_loader,\n scheduler=scheduler,\n net_type=net_type,\n epochs=EPOCHS,\n apply_zca_trans=APPLY_ZCA_TRANS)\n \n utility.save_current_model(trained_model,\n f\"./models/model_{net_type}.pt\")\n \nif __name__ == \"__main__\":\n main()" ]
[ [ "torch.optim.SGD", "numpy.save", "scipy.io.savemat", "torch.cuda.is_available", "torch.optim.lr_scheduler.ReduceLROnPlateau", "pandas.read_csv", "torch.nn.CrossEntropyLoss" ] ]
schokoro/flair
[ "92007949c818ccd9d7a85ad48309763439bd3455" ]
[ "flair/nn/model.py" ]
[ "import itertools\nimport logging\nimport warnings\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom pathlib import Path\nfrom typing import Union, List, Tuple, Dict, Optional\n\nimport torch.nn\nfrom torch.utils.data.dataset import Dataset\nfrom tqdm import tqdm\n\nimport flair\nfrom flair import file_utils\nfrom flair.data import DataPoint, Sentence, Dictionary\nfrom flair.datasets import DataLoader, SentenceDataset\nfrom flair.training_utils import Result, store_embeddings\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Model(torch.nn.Module):\n \"\"\"Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.\n Every new type of model must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def label_type(self):\n \"\"\"Each model predicts labels of a certain type. TODO: can we find a better name for this?\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def forward_loss(self, data_points: Union[List[DataPoint], DataPoint]) -> torch.tensor:\n \"\"\"Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def evaluate(\n self,\n sentences: Union[List[Sentence], Dataset],\n gold_label_type: str,\n out_path: Union[str, Path] = None,\n embedding_storage_mode: str = \"none\",\n mini_batch_size: int = 32,\n num_workers: int = 8,\n main_evaluation_metric: Tuple[str, str] = (\"micro avg\", \"f1-score\"),\n exclude_labels: List[str] = [],\n gold_label_dictionary: Optional[Dictionary] = None,\n ) -> Result:\n \"\"\"Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def _get_state_dict(self):\n \"\"\"Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n @abstractmethod\n def _init_model_with_state_dict(state):\n \"\"\"Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.\"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _fetch_model(model_name) -> str:\n return model_name\n\n def save(self, model_file: Union[str, Path], checkpoint: bool = False):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = self._get_state_dict()\n\n # in Flair <0.9.1, optimizer and scheduler used to train model are not saved\n optimizer = scheduler = None\n\n # write out a \"model card\" if one is set\n if hasattr(self, 'model_card'):\n\n # special handling for optimizer: remember optimizer class and state dictionary\n if 'training_parameters' in self.model_card:\n training_parameters = self.model_card['training_parameters']\n\n if 'optimizer' in training_parameters:\n optimizer = training_parameters['optimizer']\n if checkpoint:\n training_parameters['optimizer_state_dict'] = optimizer.state_dict()\n training_parameters['optimizer'] = optimizer.__class__\n\n if 'scheduler' in training_parameters:\n scheduler = training_parameters['scheduler']\n if checkpoint:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n training_parameters['scheduler_state_dict'] = scheduler.state_dict()\n training_parameters['scheduler'] = scheduler.__class__\n\n model_state['model_card'] = self.model_card\n\n # save model\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n # restore optimizer and scheduler to model card if set\n if optimizer:\n self.model_card['training_parameters']['optimizer'] = optimizer\n if scheduler:\n self.model_card['training_parameters']['scheduler'] = scheduler\n\n @classmethod\n def load(cls, model: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n \"\"\"\n model_file = cls._fetch_model(str(model))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location='cpu')\n\n model = cls._init_model_with_state_dict(state)\n\n if 'model_card' in state:\n model.model_card = state['model_card']\n\n model.eval()\n model.to(flair.device)\n\n return model\n\n def print_model_card(self):\n if hasattr(self, 'model_card'):\n param_out = \"\\n------------------------------------\\n\"\n param_out += \"--------- Flair Model Card ---------\\n\"\n param_out += \"------------------------------------\\n\"\n param_out += \"- this Flair model was trained with:\\n\"\n param_out += f\"-- Flair version {self.model_card['flair_version']}\\n\"\n param_out += f\"-- PyTorch version {self.model_card['pytorch_version']}\\n\"\n if 'transformers_version' in self.model_card:\n param_out += f\"-- Transformers version {self.model_card['transformers_version']}\\n\"\n param_out += \"------------------------------------\\n\"\n\n param_out += \"------- Training Parameters: -------\\n\"\n param_out += \"------------------------------------\\n\"\n training_params = '\\n'.join(f'-- {param} = {self.model_card[\"training_parameters\"][param]}'\n for param in self.model_card['training_parameters'])\n param_out += training_params + \"\\n\"\n param_out += \"------------------------------------\\n\"\n\n log.info(param_out)\n else:\n log.info(\n \"This model has no model card (likely because it is not yet trained or was trained with Flair version < 0.9.1)\")\n\n\nclass Classifier(Model):\n \"\"\"Abstract base class for all Flair models that do classification, both single- and multi-label.\n It inherits from flair.nn.Model and adds a unified evaluate() function so that all classification models\n use the same evaluation routines and compute the same numbers.\n Currently, the SequenceTagger implements this class directly, while all other classifiers in Flair\n implement the DefaultClassifier base class which implements Classifier.\"\"\"\n\n def evaluate(\n self,\n data_points: Union[List[DataPoint], Dataset],\n gold_label_type: str,\n out_path: Union[str, Path] = None,\n embedding_storage_mode: str = \"none\",\n mini_batch_size: int = 32,\n num_workers: int = 8,\n main_evaluation_metric: Tuple[str, str] = (\"micro avg\", \"f1-score\"),\n exclude_labels: List[str] = [],\n gold_label_dictionary: Optional[Dictionary] = None,\n ) -> Result:\n import numpy as np\n import sklearn\n\n # read Dataset into data loader (if list of sentences passed, make Dataset first)\n if not isinstance(data_points, Dataset):\n data_points = SentenceDataset(data_points)\n data_loader = DataLoader(data_points, batch_size=mini_batch_size, num_workers=num_workers)\n\n with torch.no_grad():\n\n # loss calculation\n eval_loss = 0\n average_over = 0\n\n # variables for printing\n lines: List[str] = []\n\n # variables for computing scores\n all_spans: List[str] = []\n all_true_values = {}\n all_predicted_values = {}\n\n sentence_id = 0\n for batch in data_loader:\n\n # remove any previously predicted labels\n for datapoint in batch:\n datapoint.remove_labels('predicted')\n\n # predict for batch\n loss_and_count = self.predict(batch,\n embedding_storage_mode=embedding_storage_mode,\n mini_batch_size=mini_batch_size,\n label_name='predicted',\n return_loss=True)\n\n if isinstance(loss_and_count, Tuple):\n average_over += loss_and_count[1]\n eval_loss += loss_and_count[0]\n else:\n eval_loss += loss_and_count\n\n # get the gold labels\n for datapoint in batch:\n\n for gold_label in datapoint.get_labels(gold_label_type):\n representation = str(sentence_id) + ': ' + gold_label.identifier\n\n value = gold_label.value\n if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:\n value = '<unk>'\n\n if representation not in all_true_values:\n all_true_values[representation] = [value]\n else:\n all_true_values[representation].append(value)\n\n if representation not in all_spans:\n all_spans.append(representation)\n\n for predicted_span in datapoint.get_labels(\"predicted\"):\n representation = str(sentence_id) + ': ' + predicted_span.identifier\n\n # add to all_predicted_values\n if representation not in all_predicted_values:\n all_predicted_values[representation] = [predicted_span.value]\n else:\n all_predicted_values[representation].append(predicted_span.value)\n\n if representation not in all_spans:\n all_spans.append(representation)\n\n sentence_id += 1\n\n store_embeddings(batch, embedding_storage_mode)\n\n # make printout lines\n if out_path:\n lines.extend(self._print_predictions(batch, gold_label_type))\n\n # write all_predicted_values to out_file if set\n if out_path:\n with open(Path(out_path), \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(\"\".join(lines))\n\n # make the evaluation dictionary\n evaluation_label_dictionary = Dictionary(add_unk=False)\n evaluation_label_dictionary.add_item(\"O\")\n for true_values in all_true_values.values():\n for label in true_values:\n evaluation_label_dictionary.add_item(label)\n for predicted_values in all_predicted_values.values():\n for label in predicted_values:\n evaluation_label_dictionary.add_item(label)\n\n # finally, compute numbers\n y_true = []\n y_pred = []\n\n for span in all_spans:\n\n true_values = all_true_values[span] if span in all_true_values else ['O']\n predicted_values = all_predicted_values[span] if span in all_predicted_values else ['O']\n\n y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)\n for true_value in true_values:\n y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1\n y_true.append(y_true_instance.tolist())\n\n y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)\n for predicted_value in predicted_values:\n y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1\n y_pred.append(y_pred_instance.tolist())\n\n # now, calculate evaluation numbers\n target_names = []\n labels = []\n\n counter = Counter()\n counter.update(list(itertools.chain.from_iterable(all_true_values.values())))\n counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))\n\n for label_name, count in counter.most_common():\n if label_name == 'O': continue\n if label_name in exclude_labels: continue\n target_names.append(label_name)\n labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))\n\n # there is at least one gold label or one prediction (default)\n if len(all_true_values) + len(all_predicted_values) > 1:\n classification_report = sklearn.metrics.classification_report(\n y_true, y_pred, digits=4, target_names=target_names, zero_division=0, labels=labels,\n )\n\n classification_report_dict = sklearn.metrics.classification_report(\n y_true, y_pred, target_names=target_names, zero_division=0, output_dict=True, labels=labels,\n )\n\n accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)\n\n precision_score = round(classification_report_dict[\"micro avg\"][\"precision\"], 4)\n recall_score = round(classification_report_dict[\"micro avg\"][\"recall\"], 4)\n micro_f_score = round(classification_report_dict[\"micro avg\"][\"f1-score\"], 4)\n macro_f_score = round(classification_report_dict[\"macro avg\"][\"f1-score\"], 4)\n\n main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]\n\n else:\n # issue error and default all evaluation numbers to 0.\n log.error(\n \"ACHTUNG! No gold labels and no all_predicted_values found! Could be an error in your corpus or how you \"\n \"initialize the trainer!\")\n accuracy_score = precision_score = recall_score = micro_f_score = macro_f_score = main_score = 0.\n classification_report = \"\"\n classification_report_dict = {}\n\n detailed_result = (\n \"\\nResults:\"\n f\"\\n- F-score (micro) {micro_f_score}\"\n f\"\\n- F-score (macro) {macro_f_score}\"\n f\"\\n- Accuracy {accuracy_score}\"\n \"\\n\\nBy class:\\n\" + classification_report\n )\n\n # line for log file\n log_header = \"PRECISION\\tRECALL\\tF1\\tACCURACY\"\n log_line = f\"{precision_score}\\t\" f\"{recall_score}\\t\" f\"{micro_f_score}\\t\" f\"{accuracy_score}\"\n\n if average_over > 0:\n eval_loss /= average_over\n\n result = Result(\n main_score=main_score,\n log_line=log_line,\n log_header=log_header,\n detailed_results=detailed_result,\n classification_report=classification_report_dict,\n loss=eval_loss\n )\n\n return result\n\n def _print_predictions(self, batch, gold_label_type):\n lines = []\n for datapoint in batch:\n # check if there is a label mismatch\n g = [label.identifier + label.value for label in datapoint.get_labels(gold_label_type)]\n p = [label.identifier + label.value for label in datapoint.get_labels('predicted')]\n g.sort()\n p.sort()\n correct_string = \" -> MISMATCH!\\n\" if g != p else \"\"\n # print info\n eval_line = f\"{datapoint.to_original_text()}\\n\" \\\n f\" - Gold: {datapoint.get_labels(gold_label_type)}\\n\" \\\n f\" - Pred: {datapoint.get_labels('predicted')}\\n{correct_string}\\n\"\n lines.append(eval_line)\n return lines\n\n\nclass DefaultClassifier(Classifier):\n \"\"\"Default base class for all Flair models that do classification, both single- and multi-label.\n It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers\n are implemented here, including the loss calculation and the predict() method.\n Currently, the TextClassifier, RelationExtractor, TextPairClassifier and SimpleSequenceTagger implement\n this class. You only need to implement the forward_pass() method to implement this base class.\n \"\"\"\n\n def forward_pass(self,\n sentences: Union[List[DataPoint], DataPoint],\n return_label_candidates: bool = False,\n ):\n \"\"\"This method does a forward pass through the model given a list of data points as input.\n Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits\n produced by the decoder and labels are the string labels for each data point.\n Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,\n where data_points are the data points to which labels are added (commonly either Sentence or Token objects)\n and candidate_labels are empty Label objects for each prediction (depending on the task Label,\n SpanLabel or RelationLabel).\"\"\"\n raise NotImplementedError\n\n def __init__(self,\n label_dictionary: Dictionary,\n multi_label: bool = False,\n multi_label_threshold: float = 0.5,\n loss_weights: Dict[str, float] = None,\n ):\n\n super().__init__()\n\n # initialize the label dictionary\n self.label_dictionary: Dictionary = label_dictionary\n\n # set up multi-label logic\n self.multi_label = multi_label\n self.multi_label_threshold = multi_label_threshold\n\n # loss weights and loss function\n self.weight_dict = loss_weights\n # Initialize the weight tensor\n if loss_weights is not None:\n n_classes = len(self.label_dictionary)\n weight_list = [1.0 for i in range(n_classes)]\n for i, tag in enumerate(self.label_dictionary.get_items()):\n if tag in loss_weights.keys():\n weight_list[i] = loss_weights[tag]\n self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)\n else:\n self.loss_weights = None\n\n if self.multi_label:\n self.loss_function = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights)\n else:\n self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights)\n\n @property\n def multi_label_threshold(self):\n return self._multi_label_threshold\n\n @multi_label_threshold.setter\n def multi_label_threshold(self, x): # setter method\n if type(x) is dict:\n if 'default' in x:\n self._multi_label_threshold = x\n else:\n raise Exception('multi_label_threshold dict should have a \"default\" key')\n else:\n self._multi_label_threshold = {'default': x}\n\n def forward_loss(self, sentences: Union[List[DataPoint], DataPoint]) -> torch.tensor:\n scores, labels = self.forward_pass(sentences)\n return self._calculate_loss(scores, labels)\n\n def _calculate_loss(self, scores, labels):\n\n if not any(labels): return torch.tensor(0., requires_grad=True, device=flair.device), 1\n\n if self.multi_label:\n labels = torch.tensor([[1 if l in all_labels_for_point else 0 for l in self.label_dictionary.get_items()]\n for all_labels_for_point in labels], dtype=torch.float, device=flair.device)\n\n else:\n labels = torch.tensor([self.label_dictionary.get_idx_for_item(label[0]) if len(label) > 0\n else self.label_dictionary.get_idx_for_item('O')\n for label in labels], dtype=torch.long, device=flair.device)\n\n return self.loss_function(scores, labels), len(labels)\n\n def predict(\n self,\n sentences: Union[List[Sentence], Sentence],\n mini_batch_size: int = 32,\n return_probabilities_for_all_classes: bool = False,\n verbose: bool = False,\n label_name: Optional[str] = None,\n return_loss=False,\n embedding_storage_mode=\"none\",\n ):\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted\n :param verbose: set to True to display a progress bar\n :param return_loss: set to True to return loss\n :param label_name: set this to change the name of the label type that is predicted\n :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if\n you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.\n 'gpu' to store embeddings in GPU memory.\n \"\"\"\n if label_name is None:\n label_name = self.label_type if self.label_type is not None else \"label\"\n\n with torch.no_grad():\n if not sentences:\n return sentences\n\n if isinstance(sentences, DataPoint):\n sentences = [sentences]\n\n # filter empty sentences\n if isinstance(sentences[0], DataPoint):\n sentences = [sentence for sentence in sentences if len(sentence) > 0]\n if len(sentences) == 0:\n return sentences\n\n # reverse sort all sequences by their length\n rev_order_len_index = sorted(range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True)\n\n reordered_sentences: List[Union[DataPoint, str]] = [sentences[index] for index in rev_order_len_index]\n\n dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size)\n # progress bar for verbosity\n if verbose:\n dataloader = tqdm(dataloader)\n\n overall_loss = 0\n batch_no = 0\n label_count = 0\n for batch in dataloader:\n\n batch_no += 1\n\n if verbose:\n dataloader.set_description(f\"Inferencing on batch {batch_no}\")\n\n # stop if all sentences are empty\n if not batch:\n continue\n\n scores, gold_labels, data_points, label_candidates = self.forward_pass(batch,\n return_label_candidates=True)\n # remove previously predicted labels of this type\n for sentence in data_points:\n sentence.remove_labels(label_name)\n\n if return_loss:\n overall_loss += self._calculate_loss(scores, gold_labels)[0]\n label_count += len(label_candidates)\n\n # if anything could possibly be predicted\n if len(label_candidates) > 0:\n if self.multi_label:\n sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)\n n_labels = sigmoided.size(1)\n for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):\n for l_idx in range(n_labels):\n label_value = self.label_dictionary.get_item_for_index(l_idx)\n if label_value == 'O': continue\n label_threshold = self._get_label_threshold(label_value)\n label_score = sigmoided[s_idx, l_idx].item()\n if label_score > label_threshold or return_probabilities_for_all_classes:\n label = label_candidate.spawn(value=label_value, score=label_score)\n data_point.add_complex_label(label_name, label)\n else:\n softmax = torch.nn.functional.softmax(scores, dim=-1)\n\n if return_probabilities_for_all_classes:\n n_labels = softmax.size(1)\n for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):\n for l_idx in range(n_labels):\n label_value = self.label_dictionary.get_item_for_index(l_idx)\n if label_value == 'O': continue\n label_score = softmax[s_idx, l_idx].item()\n label = label_candidate.spawn(value=label_value, score=label_score)\n data_point.add_complex_label(label_name, label)\n else:\n conf, idx = torch.max(softmax, dim=-1)\n for data_point, label_candidate, c, i in zip(data_points, label_candidates, conf, idx):\n label_value = self.label_dictionary.get_item_for_index(i.item())\n if label_value == 'O': continue\n label = label_candidate.spawn(value=label_value, score=c.item())\n data_point.add_complex_label(label_name, label)\n\n store_embeddings(batch, storage_mode=embedding_storage_mode)\n\n if return_loss:\n return overall_loss, label_count\n\n def _get_label_threshold(self, label_value):\n label_threshold = self.multi_label_threshold['default']\n if label_value in self.multi_label_threshold:\n label_threshold = self.multi_label_threshold[label_value]\n\n return label_threshold\n\n def __str__(self):\n return super(flair.nn.Model, self).__str__().rstrip(')') + \\\n f' (weights): {self.weight_dict}\\n' + \\\n f' (weight_tensor) {self.loss_weights}\\n)'\n" ]
[ [ "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
www516717402/edgeai-mmdetection
[ "c5563434728da227678ba3588621b4b426cda43d" ]
[ "xmmdet/core/anchor/anchor_generator.py" ]
[ "import warnings\nimport mmcv\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core.anchor.builder import ANCHOR_GENERATORS\nfrom mmdet.core.anchor import AnchorGenerator\n\n@ANCHOR_GENERATORS.register_module(force=True)\nclass SSDAnchorGenerator(AnchorGenerator):\n \"\"\"Anchor generator for SSD\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels.\n ratios (list[float]): The list of ratios between the height and width\n of anchors in a single level.\n basesize_ratio_range (tuple(float)): Ratio range of anchors.\n input_size (int): Size of feature map, 300 for SSD300,\n 512 for SSD512.\n scale_major (bool): Whether to multiply scales first when generating\n base anchors. If true, the anchors in the same row will have the\n same scales. It is always set to be False in SSD.\n \"\"\"\n\n def __init__(self,\n strides,\n ratios,\n basesize_ratio_range,\n input_size=300,\n scale_major=True):\n assert len(strides) == len(ratios)\n assert mmcv.is_tuple_of(basesize_ratio_range, float)\n\n self.strides = [_pair(stride) for stride in strides]\n self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size\n self.centers = [(stride[0] / 2., stride[1] / 2.)\n for stride in self.strides]\n self.basesize_ratio_range = basesize_ratio_range\n\n # calculate anchor ratios and sizes\n min_ratio, max_ratio = basesize_ratio_range\n min_ratio = int(min_ratio * 100)\n max_ratio = int(max_ratio * 100)\n step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))\n min_sizes = []\n max_sizes = []\n for ratio in range(int(min_ratio), int(max_ratio) + 1, step):\n min_sizes.append(int(self.input_size * ratio / 100))\n max_sizes.append(int(self.input_size * (ratio + step) / 100))\n if self.input_size == 300:\n if basesize_ratio_range[0] == 0.15: # SSD300 COCO\n min_sizes.insert(0, int(self.input_size * 7 / 100))\n max_sizes.insert(0, int(self.input_size * 15 / 100))\n elif basesize_ratio_range[0] == 0.2: # SSD300 VOC\n min_sizes.insert(0, int(self.input_size * 10 / 100))\n max_sizes.insert(0, int(self.input_size * 20 / 100))\n else:\n min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))\n max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))\n warnings.warn(\n 'according to original SSD, basesize_ratio_range[0] should be either 0.15'\n 'or 0.2 when input_size is 300, got '\n f'{basesize_ratio_range[0]}.')\n elif self.input_size == 512:\n if basesize_ratio_range[0] == 0.1: # SSD512 COCO\n min_sizes.insert(0, int(self.input_size * 4 / 100))\n max_sizes.insert(0, int(self.input_size * 10 / 100))\n elif basesize_ratio_range[0] == 0.15: # SSD512 VOC\n min_sizes.insert(0, int(self.input_size * 7 / 100))\n max_sizes.insert(0, int(self.input_size * 15 / 100))\n else:\n min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))\n max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))\n warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1'\n 'or 0.15 when input_size is 512, got'\n f' {basesize_ratio_range[0]}.')\n else:\n if basesize_ratio_range[0] == 0.1: # SSD512 COCO\n min_sizes.insert(0, int(self.input_size * 4 / 100))\n max_sizes.insert(0, int(self.input_size * 10 / 100))\n else:\n min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))\n max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))\n\n anchor_ratios = []\n anchor_scales = []\n for k in range(len(self.strides)):\n scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]\n anchor_ratio = [1.]\n for r in ratios[k]:\n anchor_ratio += [1 / r, r] # 4 or 6 ratio\n anchor_ratios.append(torch.Tensor(anchor_ratio))\n anchor_scales.append(torch.Tensor(scales))\n\n self.base_sizes = min_sizes\n self.scales = anchor_scales\n self.ratios = anchor_ratios\n self.scale_major = scale_major\n self.center_offset = 0\n self.base_anchors = self.gen_base_anchors()\n # added for proto export\n self.min_sizes = min_sizes\n self.max_sizes = max_sizes\n\n def gen_base_anchors(self):\n \"\"\"Generate base anchors.\n\n Returns:\n list(torch.Tensor): Base anchors of a feature grid in multiple \\\n feature levels.\n \"\"\"\n multi_level_base_anchors = []\n for i, base_size in enumerate(self.base_sizes):\n base_anchors = self.gen_single_level_base_anchors(\n base_size,\n scales=self.scales[i],\n ratios=self.ratios[i],\n center=self.centers[i])\n indices = list(range(len(self.ratios[i])))\n indices.insert(1, len(indices))\n base_anchors = torch.index_select(base_anchors, 0,\n torch.LongTensor(indices))\n multi_level_base_anchors.append(base_anchors)\n return multi_level_base_anchors\n\n def __repr__(self):\n \"\"\"str: a string that describes the module\"\"\"\n indent_str = ' '\n repr_str = self.__class__.__name__ + '(\\n'\n repr_str += f'{indent_str}strides={self.strides},\\n'\n repr_str += f'{indent_str}scales={self.scales},\\n'\n repr_str += f'{indent_str}scale_major={self.scale_major},\\n'\n repr_str += f'{indent_str}input_size={self.input_size},\\n'\n repr_str += f'{indent_str}scales={self.scales},\\n'\n repr_str += f'{indent_str}ratios={self.ratios},\\n'\n repr_str += f'{indent_str}num_levels={self.num_levels},\\n'\n repr_str += f'{indent_str}base_sizes={self.base_sizes},\\n'\n repr_str += f'{indent_str}basesize_ratio_range='\n repr_str += f'{self.basesize_ratio_range})'\n return repr_str\n\n" ]
[ [ "torch.nn.modules.utils._pair", "torch.LongTensor", "numpy.sqrt", "torch.Tensor", "numpy.floor" ] ]
colemanliyah/pyrfume-data
[ "1d1ccd0232f024c4509068ce682e7545580d246b", "1d1ccd0232f024c4509068ce682e7545580d246b" ]
[ "snitz_2013/main.py", "ifra_2019/main.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.10.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport pandas as pd\nfrom pyrfume.odorants import from_cids\n\ndf1 = pd.read_csv('experiment1_comparisons.csv',\n header=0,index_col=0,names=['A','B','Similarity'])\ndf1_cids = pd.read_csv('experiment1_cids.csv', index_col=0)\ndf1_cids = df1_cids.applymap(lambda x:x.replace('[','').replace(']','').strip().replace(' ',','))\ndf1_cids\ndf1.loc[:, ['A','B']] = df1.loc[:, ['A','B']].applymap(lambda x:df1_cids.loc[x]['Mixture Cids'])\ndf1.head()\n\ndf2 = pd.read_csv('experiment2_comparisons.csv',\n header=0,index_col=0,names=['A','B','Similarity'])\ndf2_cids = pd.read_csv('experiment2_cids.csv', index_col=0)\ndf2_cids = df2_cids.applymap(lambda x:x.replace('[','').replace(']','').strip().replace(' ',','))\ndf2_cids\ndf2.loc[:, ['A','B']] = df2.loc[:, ['A','B']].applymap(lambda x:df2_cids.loc[x]['Mixture Cids'])\ndf2.head()\n\ndf3 = pd.read_csv('experiment3_comparisons.csv',\n header=0,index_col=0,names=['A','B','Similarity'])\ndf3.head()\n\ndf = pd.concat([df1, df2, df3])\ndf.to_csv('behavior-main.csv')\n\ncids1 = df1_cids['Mixture Cids'].apply(str.split, args=(',')).sum()\ncids2 = df2_cids['Mixture Cids'].apply(str.split, args=(',')).sum()\ncids3 = list(df3[['A', 'B']].values.ravel())\n\ncids = cids1 + cids2 + cids3\ncids = list(set(map(int, cids)))\n\nmolecules_info = from_cids(cids)\n\npd.DataFrame(molecules_info).set_index('CID').to_csv('molecules-info.csv')\n", "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.10.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport pandas as pd\nfrom pyrfume.odorants import get_cids, from_cids\n\nraw = pd.read_csv('ifra-fragrance-ingredient-glossary---oct-2019.csv')\ncas = raw['CAS number']\n\n# + jupyter={\"outputs_hidden\": true} tags=[]\ncids = get_cids(cas)\n# -\n\nraw['CID'] = raw['CAS number'].apply(cids.get)\nraw = raw[raw['CID'].notnull() & raw['CID']>0]\n\nmolecules = pd.DataFrame(from_cids(raw['CID'])).set_index('CID')\nmolecules.to_csv('molecules.csv')\n\ncolumns = ['CID', 'Primary descriptor', 'Descriptor 2', 'Descriptor 3']\nbehavior = raw[columns].set_index('CID')\nbehavior = behavior.rename(columns={'Primary descriptor': 'Descriptor 1'})\nbehavior.to_csv('behavior.csv')\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.concat" ], [ "pandas.read_csv" ] ]
trautmane/fst
[ "d450ff38bad981a2e04d79967393a2815966f243" ]
[ "src/fibsem_tools/attrs/attrs.py" ]
[ "from pathlib import Path\nfrom typing import List, Optional, Dict, Union, Tuple, Literal, Sequence, Any\nimport fsspec\nimport numpy as np\nfrom xarray import DataArray\nfrom dataclasses import asdict, dataclass\nimport json\nfrom ..io.mrc import mrc_to_dask\nfrom ..io import read\nimport dask.array as da\nimport dacite\nfrom xarray_multiscale.metadata.util import SpatialTransform\n\nCONTAINER_TYPES ={'mrc', 'n5', 'precomputed'}\nDTYPE_FORMATS = {\"uint16\": \"n5\", \"uint8\": \"precomputed\", \"uint64\": \"n5\"}\nCONTENT_TYPES = {\"em\", \"lm\", \"prediction\", \"segmentation\", \"analysis\"}\nContainerTypes = Literal['n5', 'precomputed', 'mrc']\n\n\n@dataclass\nclass VolumeStorageSpec:\n kvStore: str\n containerType: ContainerTypes\n containerPath: str\n dataPath: str\n\n def toURI(self):\n return f'{self.kvStore}://{Path(self.containerPath).with_suffix(\".\" + self.containerType).joinpath(self.dataPath)}'\n\n def __post_init__(self):\n if self.containerType not in CONTAINER_TYPES:\n raise ValueError(\n f\"containerType must be one of {CONTAINER_TYPES}\"\n )\n\n@dataclass\nclass ContrastLimits:\n min: float\n max: float\n\n def __post_init__(self):\n if not self.min <= self.max:\n raise ValueError('min must be less than or equal to max.')\n\n\n@dataclass\nclass DisplaySettings:\n contrastLimits: ContrastLimits\n color: str = 'white'\n invertColormap: bool = False\n\n @classmethod\n def fromDict(cls, d: Dict[str, Any]):\n return dacite.from_dict(cls, d)\n\n\n@dataclass \nclass DatasetView:\n datasetName: str\n name: str\n description: str\n position: Optional[Sequence[float]]\n scale: Optional[float]\n volumeKeys: Sequence[str]\n \n @classmethod\n def fromDict(cls, d: Dict[str, Any]):\n return dacite.from_dict(cls, d)\n\n\n@dataclass\nclass MultiscaleSpec:\n reduction: str\n depth: int\n factors: Union[int, Sequence[int]]\n\n\n@dataclass\nclass MeshSource:\n path: str\n name: str\n datasetName: str\n format: str\n\n\n@dataclass\nclass VolumeSource:\n path: str\n name: str\n datasetName: str\n dataType: str\n dimensions: Sequence[float]\n transform: SpatialTransform\n contentType: str\n containerType: Optional[ContainerTypes]\n displaySettings: DisplaySettings\n description: str = ''\n version: str=\"0\"\n tags: Optional[Sequence[str]] = None\n\n def __post_init__(self):\n assert self.contentType in CONTENT_TYPES\n assert len(self.version) > 0\n\n def toDataArray(self):\n if Path(self.path).suffix == \".mrc\":\n array = mrc_to_dask(self.path, chunks=(1, -1, -1))\n else:\n r = read(self.path)\n array = da.from_array(r, chunks=r.chunks)\n coords = [\n DataArray(\n self.transform.translate[idx] + np.arange(array.shape[idx]) * self.transform.scale[idx],\n dims=ax,\n attrs= {'units': self.transform.units[idx]}\n )\n for idx, ax in enumerate(self.transform.axes)\n ]\n return DataArray(array, coords=coords, name=self.name)\n\n\n @classmethod\n def fromDict(cls, d: Dict[str, Any]):\n return dacite.from_dict(cls, d)\n\n\n\n@dataclass\nclass DatasetIndex:\n name: str\n volumes: Sequence[VolumeSource]\n meshes: Sequence[MeshSource]\n views: Sequence[DatasetView]\n \n @classmethod\n def from_json(cls, fname: Union[str, Path], open_kwargs: dict = {}):\n with fsspec.open(str(fname), mode='rt', **open_kwargs) as fh:\n jblob = json.loads(fh.read())\n return cls(**jblob)\n \n def to_json(self, fname: Union[str, Path], open_kwargs: dict = {}) -> int:\n jblob = json.dumps(asdict(self))\n with fsspec.open(str(fname), mode='wt', **open_kwargs) as fh:\n result = fh.write(jblob)\n return result\n\n\n@dataclass\nclass VolumeIngest:\n source: VolumeSource\n multiscaleSpec: MultiscaleSpec\n storageSpec: VolumeStorageSpec\n mutation: Optional[str] = None\n\n\n@dataclass\nclass COSEMArrayAttrs:\n name: str\n transform: SpatialTransform\n\n @classmethod\n def fromDataArray(cls, data: DataArray) -> \"COSEMArrayAttrs\":\n name = data.name\n if name is not None:\n return cls(str(name), SpatialTransform.fromDataArray((data)))\n else: \n raise ValueError('DataArray argument must have a valid name')\n\n\n@dataclass\nclass OMEScaleAttrs:\n path: str \n transform: SpatialTransform\n\n\n@dataclass\nclass OMEMultiscaleAttrs:\n datasets: Sequence[OMEScaleAttrs]\n\n\n@dataclass\nclass COSEMGroupAttrs:\n name: str\n multiscales: Sequence[OMEMultiscaleAttrs]\n\n\n@dataclass\nclass N5PixelResolution:\n dimensions: Sequence[float]\n unit: str\n\n\n@dataclass\nclass NeuroglancerGroupAttrs:\n # see https://github.com/google/neuroglancer/issues/176#issuecomment-553027775\n axes: Sequence[str]\n units: Sequence[str]\n scales: Sequence[Sequence[int]]\n pixelResolution: N5PixelResolution\n\n\n@dataclass\nclass MultiscaleGroupAttrs:\n name: str\n multiscales: Sequence[OMEMultiscaleAttrs]\n axes: Sequence[str]\n units: Sequence[str]\n scales: Sequence[Sequence[int]]\n pixelResolution: N5PixelResolution\n\n\ndef makeN5ArrayAttrs(dimensions: Sequence[float], unit: str) -> Dict[str, N5PixelResolution]:\n return {'pixelResolution': N5PixelResolution(dimensions, unit)}\n\n\ndef makeMultiscaleGroupAttrs(name: str,\n arrays: Sequence[DataArray], \n array_paths: Sequence[str], \n axis_order: str=\"F\") -> MultiscaleGroupAttrs:\n \n assert len(arrays) == len(array_paths)\n cosemArrayAttrs = tuple(COSEMArrayAttrs.fromDataArray(a) for a in arrays)\n \n axis_indexer = slice(None)\n # neuroglancer wants the axes reported in fortran order\n if axis_order == \"F\":\n axis_indexer = slice(-1, None, -1)\n \n axes: Tuple[str] = arrays[0].dims[axis_indexer]\n scales = tuple(tuple(s.scale_factors)[axis_indexer] for s in arrays)\n coords_reordered = tuple(arrays[0].coords[k] for k in axes)\n units = tuple(d.units for d in coords_reordered)\n\n # we need this for neuroglancer\n pixelResolution = N5PixelResolution(dimensions=cosemArrayAttrs[0].transform.scale[axis_indexer], unit=units[0])\n multiscales = OMEMultiscaleAttrs(datasets=[OMEScaleAttrs(path=ap, transform=attr.transform) for ap, attr in zip(array_paths, cosemArrayAttrs)])\n\n result = MultiscaleGroupAttrs(name=name,\n multiscales=[multiscales], \n axes=axes,\n units=units,\n scales=scales,\n pixelResolution=pixelResolution)\n return result\n\n\n@dataclass\nclass CompositeArrayAttrs:\n name: str\n transform: SpatialTransform\n pixelResolution: N5PixelResolution\n\n @classmethod\n def fromDataArray(cls, data: DataArray):\n cosemAttrs = COSEMArrayAttrs.fromDataArray(data)\n pixelResolution = N5PixelResolution(cosemAttrs.transform.scale[::-1], unit=cosemAttrs.transform.units[0])\n return cls(cosemAttrs.name, cosemAttrs.transform, pixelResolution)" ]
[ [ "numpy.arange" ] ]
Let-Me-Code/Hepatitis-B-Mortality-Prediction
[ "3ac192ac814cb90fe38db5e0f7c8ba97b5491597" ]
[ "app.py" ]
[ "# Core Pkgs\nimport streamlit as st\n\n# EDA Pkgs\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\n\n# Utils\nimport os\nimport joblib\nimport hashlib\n# passlib,bcrypt\n\n# Data Viz Pkgs\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg')\n\n# DB\nfrom managed_db import *\n\n# Password\ndef generate_hashes(password):\n\treturn hashlib.sha256(str.encode(password)).hexdigest()\n\n\ndef verify_hashes(password,hashed_text):\n\tif generate_hashes(password) == hashed_text:\n\t\treturn hashed_text\n\treturn False\n\n\nfeature_names_best = ['age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites','varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime','histology']\n\ngender_dict = {\"male\":1,\"female\":2}\nfeature_dict = {\"No\":1,\"Yes\":2}\n\n\ndef get_value(val,my_dict):\n\tfor key,value in my_dict.items():\n\t\tif val == key:\n\t\t\treturn value\n\ndef get_key(val,my_dict):\n\tfor key,value in my_dict.items():\n\t\tif val == key:\n\t\t\treturn key\n\ndef get_fvalue(val):\n\tfeature_dict = {\"No\":1,\"Yes\":2}\n\tfor key,value in feature_dict.items():\n\t\tif val == key:\n\t\t\treturn value\n\n# Load ML Models\ndef load_model(model_file):\n\tloaded_model = joblib.load(open(os.path.join(model_file),\"rb\"))\n\treturn loaded_model\n\n\n# ML Interpretation\nimport lime\nimport lime.lime_tabular\n\n\nhtml_temp = \"\"\"\n\t\t<div style=\"background-color:{};padding:10px;border-radius:10px\">\n\t\t<h1 style=\"color:white;text-align:center;\">Disease Mortality Prediction </h1>\n\t\t<h5 style=\"color:white;text-align:center;\">Hepatitis B </h5>\n\t\t</div>\n\t\t\"\"\"\n\n# Avatar Image using a url\navatar1 =\"https://www.w3schools.com/howto/img_avatar1.png\"\navatar2 =\"https://www.w3schools.com/howto/img_avatar2.png\"\n\nresult_temp =\"\"\"\n\t<div style=\"background-color:#464e5f;padding:10px;border-radius:10px;margin:10px;\">\n\t<h4 style=\"color:white;text-align:center;\">Algorithm:: {}</h4>\n\t<img src=\"https://www.w3schools.com/howto/img_avatar.png\" alt=\"Avatar\" style=\"vertical-align: middle;float:left;width: 50px;height: 50px;border-radius: 50%;\" >\n\t<br/>\n\t<br/>\n\t<p style=\"text-align:justify;color:white\">{} % probalibilty that Patient {}s</p>\n\t</div>\n\t\"\"\"\n\nresult_temp2 =\"\"\"\n\t<div style=\"background-color:#464e5f;padding:10px;border-radius:10px;margin:10px;\">\n\t<h4 style=\"color:white;text-align:center;\">Algorithm:: {}</h4>\n\t<img src=\"https://www.w3schools.com/howto/{}\" alt=\"Avatar\" style=\"vertical-align: middle;float:left;width: 50px;height: 50px;border-radius: 50%;\" >\n\t<br/>\n\t<br/>\n\t<p style=\"text-align:justify;color:white\">{} % probalibilty that Patient {}s</p>\n\t</div>\n\t\"\"\"\n\nprescriptive_message_temp =\"\"\"\n\t<div style=\"background-color:silver;overflow-x: auto; padding:10px;border-radius:5px;margin:10px;\">\n\t\t<h3 style=\"text-align:justify;color:black;padding:10px\">Recommended Life style modification</h3>\n\t\t<ul>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Exercise Daily</li>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Get Plenty of Rest</li>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Exercise Daily</li>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Avoid Alchol</li>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Proper diet</li>\n\t\t<ul>\n\t\t<h3 style=\"text-align:justify;color:black;padding:10px\">Medical Mgmt</h3>\n\t\t<ul>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Consult your doctor</li>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Take your interferons</li>\n\t\t<li style=\"text-align:justify;color:black;padding:10px\">Go for checkups</li>\n\t\t<ul>\n\t</div>\n\t\"\"\"\n\n\ndescriptive_message_temp =\"\"\"\n\t<div style=\"background-color:silver;overflow-x: auto; padding:10px;border-radius:5px;margin:10px;\">\n\t\t<h3 style=\"text-align:justify;color:black;padding:10px\">Definition</h3>\n\t\t<p>Hepatitis B is a viral infection that attacks the liver and can cause both acute and chronic disease.</p>\n\t</div>\n\t\"\"\"\n\n@st.cache\ndef load_image(img):\n\tim =Image.open(os.path.join(img))\n\treturn im\n\n\ndef change_avatar(sex):\n\tif sex == \"male\":\n\t\tavatar_img = 'img_avatar.png'\n\telse:\n\t\tavatar_img = 'img_avatar2.png'\n\treturn avatar_img\n\n\ndef main():\n\t\"\"\"Hep Mortality Prediction App\"\"\"\n\t# st.title(\"Hepatitis Mortality Prediction App\")\n\tst.markdown(html_temp.format('royalblue'),unsafe_allow_html=True)\n\n\tmenu = [\"Home\",\"Login\",\"SignUp\"]\n\tsub_menu = [\"Plot\",\"Prediction\"] #,\"Metrics\"]\n\n\tchoice = st.sidebar.selectbox(\"Menu\",menu)\n\tif choice == \"Home\":\n\t\tst.subheader(\"Home\")\n\t\t# st.text(\"What is Hepatitis?\")\n\t\tst.markdown(descriptive_message_temp,unsafe_allow_html=True)\n\t\tst.image(load_image('hepimage.jpg'))\n\n\n\telif choice == \"Login\":\n\t\tusername = st.sidebar.text_input(\"Username\")\n\t\tpassword = st.sidebar.text_input(\"Password\",type='password')\n\t\tif st.sidebar.checkbox(\"Login\"):\n\t\t\tcreate_usertable()\n\t\t\thashed_pswd = generate_hashes(password)\n\t\t\tresult = login_user(username,verify_hashes(password,hashed_pswd))\n\t\t\t# if password == \"12345\":\n\t\t\tif result:\n\t\t\t\tst.success(\"Welcome {}\".format(username))\n\n\t\t\t\tactivity = st.selectbox(\"Activity\",sub_menu)\n\t\t\t\tif activity == \"Plot\":\n\t\t\t\t\tst.subheader(\"Data Vis Plot\")\n\t\t\t\t\tdf = pd.read_csv(\"clean_hepatitis_dataset.csv\")\n\t\t\t\t\tst.dataframe(df)\n\n\t\t\t\t\tdf['class'].value_counts().plot(kind='bar')\n\t\t\t\t\tst.pyplot()\n\n\t\t\t\t\t# Freq Dist Plot\n\t\t\t\t\tfreq_df = pd.read_csv(\"freq_df_hepatitis_dataset.csv\")\n\t\t\t\t\tst.bar_chart(freq_df['count'])\n\n\n\t\t\t\t\tif st.checkbox(\"Area Chart\"):\n\t\t\t\t\t\tall_columns = df.columns.to_list()\n\t\t\t\t\t\tfeat_choices = st.multiselect(\"Choose a Feature\",all_columns)\n\t\t\t\t\t\tnew_df = df[feat_choices]\n\t\t\t\t\t\tst.area_chart(new_df)\n\n\n\n\t\t\t\telif activity == \"Prediction\":\n\t\t\t\t\tst.subheader(\"Predictive Analytics\")\n\n\t\t\t\t\tage = st.number_input(\"Age\",7,80)\n\t\t\t\t\tsex = st.radio(\"Sex\",tuple(gender_dict.keys()))\n\t\t\t\t\tsteroid = st.radio(\"Do You Take Steroids?\",tuple(feature_dict.keys()))\n\t\t\t\t\tantivirals = st.radio(\"Do You Take Antivirals?\",tuple(feature_dict.keys()))\n\t\t\t\t\tfatigue = st.radio(\"Do You Have Fatigue\",tuple(feature_dict.keys()))\n\t\t\t\t\tspiders = st.radio(\"Presence of Spider Naeve\",tuple(feature_dict.keys()))\n\t\t\t\t\tascites = st.selectbox(\"Ascities\",tuple(feature_dict.keys()))\n\t\t\t\t\tvarices = st.selectbox(\"Presence of Varices\",tuple(feature_dict.keys()))\n\t\t\t\t\tbilirubin = st.number_input(\"bilirubin Content\",0.0,8.0)\n\t\t\t\t\talk_phosphate = st.number_input(\"Alkaline Phosphate Content\",0.0,296.0)\n\t\t\t\t\tsgot = st.number_input(\"Sgot\",0.0,648.0)\n\t\t\t\t\talbumin = st.number_input(\"Albumin\",0.0,6.4)\n\t\t\t\t\tprotime = st.number_input(\"Prothrombin Time\",0.0,100.0)\n\t\t\t\t\thistology = st.selectbox(\"Histology\",tuple(feature_dict.keys()))\n\t\t\t\t\tfeature_list = [age,get_value(sex,gender_dict),get_fvalue(steroid),get_fvalue(antivirals),get_fvalue(fatigue),get_fvalue(spiders),get_fvalue(ascites),get_fvalue(varices),bilirubin,alk_phosphate,sgot,albumin,int(protime),get_fvalue(histology)]\n\t\t\t\t\tst.write(len(feature_list))\n\t\t\t\t\tst.write(feature_list)\n\t\t\t\t\tpretty_result = {\"age\":age,\"sex\":sex,\"steroid\":steroid,\"antivirals\":antivirals,\"fatigue\":fatigue,\"spiders\":spiders,\"ascites\":ascites,\"varices\":varices,\"bilirubin\":bilirubin,\"alk_phosphate\":alk_phosphate,\"sgot\":sgot,\"albumin\":albumin,\"protime\":protime,\"histolog\":histology}\n\t\t\t\t\tst.json(pretty_result)\n\t\t\t\t\tsingle_sample = np.array(feature_list).reshape(1,-1)\n\n\t\t\t\t\t# ML\n\t\t\t\t\tmodel_choice = st.selectbox(\"Select Model\",[\"LR\",\"KNN\",\"DecisionTree\"])\n\t\t\t\t\tif st.button(\"Predict\"):\n\t\t\t\t\t\tif model_choice == \"KNN\":\n\t\t\t\t\t\t\tloaded_model = load_model(\"knn_hepB_model.pkl\")\n\t\t\t\t\t\t\tprediction = loaded_model.predict(single_sample)\n\t\t\t\t\t\t\tpred_prob = loaded_model.predict_proba(single_sample)\n\t\t\t\t\t\telif model_choice == \"DecisionTree\":\n\t\t\t\t\t\t\tloaded_model = load_model(\"decision_tree_clf_hepB_model.pkl\")\n\t\t\t\t\t\t\tprediction = loaded_model.predict(single_sample)\n\t\t\t\t\t\t\tpred_prob = loaded_model.predict_proba(single_sample)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tloaded_model = load_model(\"logistic_regression_hepB_model.pkl\")\n\t\t\t\t\t\t\tprediction = loaded_model.predict(single_sample)\n\t\t\t\t\t\t\tpred_prob = loaded_model.predict_proba(single_sample)\n\n\t\t\t\t\t\t# st.write(prediction)\n\t\t\t\t\t\t# prediction_label = {\"Die\":1,\"Live\":2}\n\t\t\t\t\t\t# final_result = get_key(prediction,prediction_label)\n\t\t\t\t\t\tif prediction == 1:\n\t\t\t\t\t\t\tst.warning(\"Patient Dies\")\n\t\t\t\t\t\t\tpred_probability_score = {\"Die\":pred_prob[0][0]*100,\"Live\":pred_prob[0][1]*100}\n\t\t\t\t\t\t\tst.subheader(\"Prediction Probability Score using {}\".format(model_choice))\n\t\t\t\t\t\t\tst.json(pred_probability_score)\n\t\t\t\t\t\t\tst.subheader(\"Prescriptive Analytics\")\n\t\t\t\t\t\t\tst.markdown(prescriptive_message_temp,unsafe_allow_html=True)\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tst.success(\"Patient Lives\")\n\t\t\t\t\t\t\tpred_probability_score = {\"Die\":pred_prob[0][0]*100,\"Live\":pred_prob[0][1]*100}\n\t\t\t\t\t\t\tst.subheader(\"Prediction Probability Score using {}\".format(model_choice))\n\t\t\t\t\t\t\tst.json(pred_probability_score)\n\n\t\t\t\t\tif st.checkbox(\"Interpret\"):\n\t\t\t\t\t\tif model_choice == \"KNN\":\n\t\t\t\t\t\t\tloaded_model = load_model(\"knn_hepB_model.pkl\")\n\n\t\t\t\t\t\telif model_choice == \"DecisionTree\":\n\t\t\t\t\t\t\tloaded_model = load_model(\"decision_tree_clf_hepB_model.pkl\")\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tloaded_model = load_model(\"logistic_regression_hepB_model.pkl\")\n\n\n\t\t\t\t\t\t\t# loaded_model = load_model(\"models/logistic_regression_model.pkl\")\n\t\t\t\t\t\t\t# 1 Die and 2 Live\n\t\t\t\t\t\t\tdf = pd.read_csv(\"clean_hepatitis_dataset.csv\")\n\t\t\t\t\t\t\tx = df[['age', 'sex', 'steroid', 'antivirals','fatigue','spiders', 'ascites','varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime','histology']]\n\t\t\t\t\t\t\tfeature_names = ['age', 'sex', 'steroid', 'antivirals','fatigue','spiders', 'ascites','varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime','histology']\n\t\t\t\t\t\t\tclass_names = ['Die(1)','Live(2)']\n\t\t\t\t\t\t\texplainer = lime.lime_tabular.LimeTabularExplainer(x.values,feature_names=feature_names, class_names=class_names,discretize_continuous=True)\n\t\t\t\t\t\t\t# The Explainer Instance\n\t\t\t\t\t\t\texp = explainer.explain_instance(np.array(feature_list), loaded_model.predict_proba,num_features=13, top_labels=1)\n\t\t\t\t\t\t\texp.show_in_notebook(show_table=True, show_all=False)\n\t\t\t\t\t\t\t# exp.save_to_file('lime_oi.html')\n\t\t\t\t\t\t\tst.write(exp.as_list())\n\t\t\t\t\t\t\tnew_exp = exp.as_list()\n\t\t\t\t\t\t\tlabel_limits = [i[0] for i in new_exp]\n\t\t\t\t\t\t\t# st.write(label_limits)\n\t\t\t\t\t\t\tlabel_scores = [i[1] for i in new_exp]\n\t\t\t\t\t\t\tplt.barh(label_limits,label_scores)\n\t\t\t\t\t\t\tst.pyplot()\n\t\t\t\t\t\t\tplt.figure(figsize=(20,10))\n\t\t\t\t\t\t\tfig = exp.as_pyplot_figure()\n\t\t\t\t\t\t\tst.pyplot()\n\n\n\n\n\n\n\t\t\telse:\n\t\t\t\tst.warning(\"Incorrect Username/Password\")\n\n\n\telif choice == \"SignUp\":\n\t\tnew_username = st.text_input(\"User name\")\n\t\tnew_password = st.text_input(\"Password\", type='password')\n\n\t\tconfirm_password = st.text_input(\"Confirm Password\",type='password')\n\t\tif new_password == confirm_password:\n\t\t\tst.success(\"Password Confirmed\")\n\t\telse:\n\t\t\tst.warning(\"Passwords not the same\")\n\n\t\tif st.button(\"Submit\"):\n\t\t\tcreate_usertable()\n\t\t\thashed_new_password = generate_hashes(new_password)\n\t\t\tadd_userdata(new_username,hashed_new_password)\n\t\t\tst.success(\"You have successfully created a new account\")\n\t\t\tst.info(\"Login to Get Started\")\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n" ]
[ [ "matplotlib.use", "numpy.array", "matplotlib.pyplot.figure", "matplotlib.pyplot.barh", "pandas.read_csv" ] ]
evgeniya-egupova/nncf
[ "39a3c5b2e5cc7d33723154d2e622d4d7882a99a4", "39a3c5b2e5cc7d33723154d2e622d4d7882a99a4", "39a3c5b2e5cc7d33723154d2e622d4d7882a99a4" ]
[ "tests/torch/test_backward_compat.py", "tests/torch/test_models/ssd_mobilenet.py", "nncf/tensorflow/tensor.py" ]
[ "\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport json\nimport os\n\nimport pytest\nimport torch\n\nfrom examples.torch.common.distributed import configure_distributed\nfrom examples.torch.common.execution import ExecutionMode\nfrom examples.torch.common.execution import get_device\nfrom examples.torch.common.execution import prepare_model_for_execution\nfrom examples.torch.common.model_loader import load_model\nfrom examples.torch.common.sample_config import SampleConfig\nfrom nncf.api.compression import CompressionStage\nfrom nncf.torch import register_default_init_args\nfrom nncf.torch.checkpoint_loading import load_state\nfrom nncf.common.graph.definitions import MODEL_INPUT_OP_NAME\nfrom nncf.config import NNCFConfig\nfrom nncf.torch.nncf_network import LEGACY_ACT_STORAGE_NAME\nfrom nncf.torch.nncf_network import MODEL_WRAPPED_BY_NNCF_ATTR_NAME\nfrom tests.common.helpers import TEST_ROOT\nfrom tests.torch.helpers import create_ones_mock_dataloader\nfrom tests.torch.helpers import register_bn_adaptation_init_args\nfrom tests.torch.quantization.test_range_init import SingleConv2dIdentityModel\nfrom tests.torch.test_compressed_graph import get_basic_quantization_config\nfrom tests.torch.helpers import create_compressed_model_and_algo_for_test\nfrom tests.torch.helpers import Command\nfrom tests.common.helpers import get_cli_dict_args\nfrom tests.torch.test_sanity_sample import create_command_line\n\nGLOBAL_CONFIG = {\n TEST_ROOT.joinpath(\"torch\", \"data\", \"configs\", \"squeezenet1_1_cifar10_rb_sparsity_int8.json\"): [\n {\n 'checkpoint_name': 'squeezenet1_1_custom_cifar10_rb_sparsity_int8_dp.pth',\n 'dataset': \"cifar10\",\n 'execution_mode': ExecutionMode.GPU_DATAPARALLEL,\n },\n {\n 'checkpoint_name': 'squeezenet1_1_custom_cifar10_rb_sparsity_int8_ddp.pth',\n 'dataset': \"cifar10\",\n 'execution_mode': ExecutionMode.MULTIPROCESSING_DISTRIBUTED,\n },\n ],\n}\n\nCONFIG_PARAMS = []\nfor config_path_, cases_list_ in GLOBAL_CONFIG.items():\n for case_params_ in cases_list_:\n CONFIG_PARAMS.append((config_path_, case_params_,))\n\n\n@pytest.fixture(scope='module', params=CONFIG_PARAMS,\n ids=['-'.join([str(p[0]), p[1]['execution_mode']]) for p in CONFIG_PARAMS])\ndef _params(request, backward_compat_models_path):\n if backward_compat_models_path is None:\n pytest.skip('Path to models weights for backward compatibility testing is not set,'\n ' use --backward-compat-models option.')\n config_path, case_params = request.param\n checkpoint_path = str(os.path.join(backward_compat_models_path, case_params['checkpoint_name']))\n return {\n 'sample_config_path': config_path,\n 'checkpoint_path': checkpoint_path,\n 'execution_mode': case_params['execution_mode'],\n 'dataset': case_params['dataset']\n }\n\n\ndef test_model_can_be_loaded_with_resume(_params):\n p = _params\n sample_config_path = p['sample_config_path']\n checkpoint_path = p['checkpoint_path']\n\n config = SampleConfig.from_json(str(sample_config_path))\n nncf_config = NNCFConfig.from_json(str(sample_config_path))\n\n config.execution_mode = p['execution_mode']\n\n config.current_gpu = 0\n config.device = get_device(config)\n config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)\n if config.distributed:\n config.dist_url = \"tcp://127.0.0.1:9898\"\n config.dist_backend = \"nccl\"\n config.rank = 0\n config.world_size = 1\n configure_distributed(config)\n\n model_name = config['model']\n model = load_model(model_name,\n pretrained=False,\n num_classes=config.get('num_classes', 1000),\n model_params=config.get('model_params'))\n nncf_config = register_default_init_args(nncf_config, train_loader=create_ones_mock_dataloader(nncf_config))\n\n model.to(config.device)\n model, compression_ctrl = create_compressed_model_and_algo_for_test(model, nncf_config)\n model, _ = prepare_model_for_execution(model, config)\n\n if config.distributed:\n compression_ctrl.distributed()\n\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n load_state(model, checkpoint['state_dict'], is_resume=True)\n\n\ndef test_loaded_model_evals_according_to_saved_acc(_params, tmp_path, dataset_dir):\n p = _params\n config_path = p['sample_config_path']\n checkpoint_path = p['checkpoint_path']\n\n metrics_path = str(tmp_path.joinpath('metrics.json'))\n tmp_path = str(tmp_path)\n args = {}\n if not dataset_dir:\n dataset_dir = tmp_path\n args['data'] = dataset_dir\n args['dataset'] = p['dataset']\n args['config'] = str(config_path)\n args['mode'] = 'test'\n args['log-dir'] = tmp_path\n args['workers'] = 0 # Workaroundr the PyTorch MultiProcessingDataLoader issue\n args['seed'] = 1\n args['resume'] = checkpoint_path\n args['metrics-dump'] = metrics_path\n\n if p['execution_mode'] == ExecutionMode.MULTIPROCESSING_DISTRIBUTED:\n args['multiprocessing-distributed'] = ''\n else:\n pytest.skip(\"DataParallel eval takes too long for this test to be run during pre-commit\")\n\n runner = Command(create_command_line(get_cli_dict_args(args), \"classification\"))\n runner.run()\n\n with open(metrics_path, encoding='utf8') as metric_file:\n metrics = json.load(metric_file)\n # accuracy is rounded to hundredths\n assert torch.load(checkpoint_path)['best_acc1'] == pytest.approx(metrics['Accuracy'], abs=1e-2)\n\n\nold_style_sd = {\n f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.weight': torch.ones([3, 3, 1, 1]),\n f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.bias': torch.ones([3]),\n f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op._num_bits': 8 * torch.ones([1], dtype=torch.int32),\n f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op.signed_tensor': torch.ones([1], dtype=torch.int32),\n f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op.enabled': torch.ones([1], dtype=torch.int32),\n f'{MODEL_WRAPPED_BY_NNCF_ATTR_NAME}.conv2d.pre_ops.0.op.scale': torch.ones([3, 1, 1, 1]),\n f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT._num_bits': 8 * torch.ones([1], dtype=torch.int32),\n f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT.signed_tensor': torch.zeros([1], dtype=torch.int32),\n f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT.enabled': torch.ones([1], dtype=torch.int32),\n f'{LEGACY_ACT_STORAGE_NAME}./{MODEL_INPUT_OP_NAME}_0|OUTPUT.scale': torch.ones([1]),\n}\n\n\ndef test_renamed_activation_quantizer_storage_in_state_dict():\n model = SingleConv2dIdentityModel()\n config = get_basic_quantization_config(input_info={\n \"sample_size\": [1, 3, 100, 100]\n })\n register_bn_adaptation_init_args(config)\n compressed_model, _ = create_compressed_model_and_algo_for_test(model, config)\n\n with pytest.deprecated_call():\n _ = load_state(compressed_model, old_style_sd, is_resume=True)\n\n\ndef test_can_compress_with_config_and_resume_of_old_checkpoint():\n model = SingleConv2dIdentityModel()\n config = get_basic_quantization_config(input_info={\n \"sample_size\": [1, 3, 100, 100]\n })\n register_bn_adaptation_init_args(config)\n create_compressed_model_and_algo_for_test(model, config, compression_state=old_style_sd)\n\n\n# BN Wrapping backward compatibility test\n\n\nclass ConvBNLayer(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 9, (3, 3))\n self.bn = torch.nn.BatchNorm2d(9)\n self.conv1 = torch.nn.Conv2d(9, 3, (3, 3))\n self.bn1 = torch.nn.BatchNorm2d(3)\n\n def forward(self, x):\n x = self.bn(self.conv(x))\n return self.bn1(self.conv1(x))\n\n\nsd_without_nncf_bn_wrapping = {\n 'nncf_module.conv.weight': torch.empty([9, 3, 3, 3]),\n 'nncf_module.conv.bias': torch.empty([9]),\n 'nncf_module.conv.nncf_padding_value': torch.empty([1]),\n 'nncf_module.conv.pre_ops.0.op._num_bits': torch.empty([1]),\n 'nncf_module.conv.pre_ops.0.op.signed_tensor': torch.empty([1]),\n 'nncf_module.conv.pre_ops.0.op.enabled': torch.empty([1]),\n 'nncf_module.conv.pre_ops.0.op.scale': torch.empty([9, 1, 1, 1]),\n 'nncf_module.bn.weight': torch.empty([9]),\n 'nncf_module.bn.bias': torch.empty([9]),\n 'nncf_module.bn.running_mean': torch.empty([9]),\n 'nncf_module.bn.running_var': torch.empty([9]),\n 'nncf_module.bn.num_batches_tracked': torch.empty([]),\n 'nncf_module.conv1.weight': torch.empty([3, 9, 3, 3]),\n 'nncf_module.conv1.bias': torch.empty([3]),\n 'nncf_module.conv1.nncf_padding_value': torch.empty([1]),\n 'nncf_module.conv1.pre_ops.0.op._num_bits': torch.empty([1]),\n 'nncf_module.conv1.pre_ops.0.op.signed_tensor': torch.empty([1]),\n 'nncf_module.conv1.pre_ops.0.op.enabled': torch.empty([1]),\n 'nncf_module.conv1.pre_ops.0.op.scale': torch.empty([3, 1, 1, 1]),\n 'nncf_module.bn1.weight': torch.empty([3]),\n 'nncf_module.bn1.bias': torch.empty([3]),\n 'nncf_module.bn1.running_mean': torch.empty([3]),\n 'nncf_module.bn1.running_var': torch.empty([3]),\n 'nncf_module.bn1.num_batches_tracked': torch.empty([]),\n 'external_quantizers./nncf_model_input_0|OUTPUT._num_bits': torch.empty([1]),\n 'external_quantizers./nncf_model_input_0|OUTPUT.signed_tensor': torch.empty([1]),\n 'external_quantizers./nncf_model_input_0|OUTPUT.enabled': torch.empty([1]),\n 'external_quantizers./nncf_model_input_0|OUTPUT.scale': torch.empty([1]),\n # Old bn layer names: |||||||||||\n 'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT._num_bits': torch.empty([1]),\n 'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT.signed_tensor': torch.empty([1]),\n 'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT.enabled': torch.empty([1]),\n 'external_quantizers.ConvBNLayer/BatchNorm2d[bn]/batch_norm_0|OUTPUT.scale': torch.empty([1])\n}\n\ncompression_state_without_bn_wrapping = {\n 'builder_state':\n {'quantization':\n {'quantizer_setup':\n {'quantization_points':\n {1: {'qip': {'target_node_name': '/nncf_model_input_0', 'input_port_id': None},\n 'qip_class': 'ActivationQuantizationInsertionPoint',\n 'qconfig':\n {'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': None, 'per_channel': False},\n 'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv]/conv2d_0']},\n # Old bn layer name: |||||||||||\n 2: {'qip': {'target_node_name': 'ConvBNLayer/BatchNorm2d[bn]/batch_norm_0', 'input_port_id': None},\n 'qip_class': 'ActivationQuantizationInsertionPoint',\n 'qconfig':\n {'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': None, 'per_channel': False},\n 'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv1]/conv2d_0']},\n 4: {'qip': {'target_node_name': 'ConvBNLayer/NNCFConv2d[conv]/conv2d_0'},\n 'qip_class': 'WeightQuantizationInsertionPoint',\n 'qconfig':\n {'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': True, 'per_channel': True},\n 'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv]/conv2d_0']},\n 5: {'qip': {'target_node_name': 'ConvBNLayer/NNCFConv2d[conv1]/conv2d_0'},\n 'qip_class': 'WeightQuantizationInsertionPoint',\n 'qconfig':\n {'num_bits': 8, 'mode': 'symmetric', 'signedness_to_force': True, 'per_channel': True},\n 'directly_quantized_operator_node_names': ['ConvBNLayer/NNCFConv2d[conv1]/conv2d_0']}},\n 'unified_scale_groups': {}, 'shared_input_operation_set_groups': {0: [1, 4], 1: [2, 5]}},\n 'build_time_metric_infos': {'aq_potential_num': 3, 'wq_potential_num': 4}}},\n 'ctrl_state': {'quantization': {'loss_state': None, 'scheduler_state': {'current_step': -1, 'current_epoch': -1},\n 'compression_stage': CompressionStage.FULLY_COMPRESSED}}}\n\n\ndef test_quantization_ckpt_without_wrapped_bn_loading():\n model = ConvBNLayer()\n config = get_basic_quantization_config(input_info={\n \"sample_size\": [1, 3, 100, 100]\n })\n register_bn_adaptation_init_args(config)\n with pytest.deprecated_call():\n compressed_model, _ = \\\n create_compressed_model_and_algo_for_test(model, config,\n compression_state=compression_state_without_bn_wrapping)\n with pytest.deprecated_call():\n _ = load_state(compressed_model, sd_without_nncf_bn_wrapping, is_resume=True)\n", "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom examples.torch.common.sample_config import SampleConfig\n\nfrom examples.torch.object_detection.layers.modules.ssd_head import MultiOutputSequential, SSDDetectionOutput\nfrom nncf.torch.checkpoint_loading import load_state\n\n\ndef conv_bn(inp, oup, kernel, stride, padding):\n return nn.Sequential(\n nn.Conv2d(inp, oup, kernel, stride, padding, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU(inplace=True)\n )\n\n\ndef conv_dw(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n nn.ReLU(inplace=True),\n\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU(inplace=True),\n )\n\n\ndef mobilenet(start_input_channels=3):\n model = MultiOutputSequential(\n [11, 13],\n [\n conv_bn(start_input_channels, 32, 3, 2, 1),\n conv_dw(32, 64, 1),\n conv_dw(64, 128, 2),\n conv_dw(128, 128, 1),\n conv_dw(128, 256, 2),\n conv_dw(256, 256, 1),\n conv_dw(256, 512, 2),\n conv_dw(512, 512, 1),\n conv_dw(512, 512, 1),\n conv_dw(512, 512, 1),\n conv_dw(512, 512, 1),\n conv_dw(512, 512, 1),\n conv_dw(512, 1024, 2),\n conv_dw(1024, 1024, 1)\n ]\n )\n return model\n\n\ndef extra_layers(start_input_channels):\n return MultiOutputSequential(\n [1, 3, 5, 7],\n [\n conv_bn(start_input_channels, 256, 1, 1, 0),\n conv_bn(256, 512, 3, 2, 1),\n conv_bn(512, 128, 1, 1, 0),\n conv_bn(128, 256, 3, 2, 1),\n conv_bn(256, 128, 1, 1, 0),\n conv_bn(128, 256, 3, 2, 1),\n conv_bn(256, 64, 1, 1, 0),\n conv_bn(64, 128, 3, 2, 1)\n ]\n )\n\n\nclass MobileNetSSD(nn.Module):\n def __init__(self, num_classes, cfg):\n super().__init__()\n self.cfg = cfg\n self.num_classes = num_classes\n\n self.basenet = mobilenet()\n self.extras = extra_layers(1024)\n\n NUM_INPUT_FEATURES = [512, 1024, 512, 256, 256, 128]\n self.detection_head = SSDDetectionOutput(NUM_INPUT_FEATURES, num_classes, cfg)\n\n def forward(self, x):\n img_tensor = x[0].clone().unsqueeze(0)\n\n sources, x = self.basenet(x)\n extra_sources, x = self.extras(x)\n\n return self.detection_head(sources + extra_sources, img_tensor)\n\n\ndef build_ssd_mobilenet(cfg, size, num_classes, config):\n if size != 300:\n raise ValueError(\"Only Mobilenet-SSD with input size 300 is supported\")\n mobilenet_ssd = MobileNetSSD(num_classes, cfg)\n\n if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None):\n print('Loading base network...')\n basenet_weights = torch.load(config.basenet)['state_dict']\n new_weights = {}\n for wn, wv in basenet_weights.items():\n wn = wn.replace('model.', '')\n new_weights[wn] = wv\n\n load_state(mobilenet_ssd.basenet, new_weights, is_resume=False)\n return mobilenet_ssd\n\n\ndef ssd_mobilenet():\n ssd_params = SampleConfig({\n \"variance\": [0.1, 0.1, 0.2, 0.2],\n \"max_sizes\": [60, 111, 162, 213, 264, 315],\n \"min_sizes\": [30, 60, 111, 162, 213, 264],\n \"steps\": [16, 32, 64, 100, 150, 300],\n \"aspect_ratios\": [[2], [2, 3], [2, 3], [2, 3], [2], [2]],\n \"clip\": False,\n \"flip\": True,\n \"top_k\": 200\n })\n\n return MobileNetSSD(21, ssd_params)\n", "\"\"\"\n Copyright (c) 2021 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom typing import List, Union\n\nfrom nncf.common.tensor import NNCFTensor\nfrom nncf.common.tensor import NNCFBaseTensorProcessor\n\n\nclass TFNNCFTensorProcessor(NNCFBaseTensorProcessor):\n \"\"\"\n A realization of the processing methods set for TFNNCFTensors.\n \"\"\"\n\n @classmethod\n def concatenate(cls, tensors: List[NNCFTensor], axis: int) -> NNCFTensor:\n # pylint: disable=E1120,E1123\n ret_tensor = tf.concat([t.tensor for t in tensors], axis=axis)\n return TFNNCFTensor(ret_tensor)\n\n @classmethod\n def ones(cls, shape: Union[int, List[int]], device: tf.device) -> NNCFTensor:\n with tf.device(device):\n return TFNNCFTensor(tf.ones(shape))\n\n @classmethod\n def assert_allclose(cls, tensors: List[NNCFTensor]) -> None:\n for input_mask in tensors[1:]:\n tf.debugging.assert_near(tensors[0].tensor, input_mask.tensor)\n\n @classmethod\n def repeat(cls, tensor: NNCFTensor, repeats: int) -> NNCFTensor:\n ret_tensor = tf.repeat(tensor, repeats=repeats)\n return TFNNCFTensor(ret_tensor)\n\n @classmethod\n def elementwise_mask_propagation(cls, input_masks: List[NNCFTensor]) -> NNCFTensor:\n cls.assert_allclose(input_masks)\n return input_masks[0]\n\n\nclass TFNNCFTensor(NNCFTensor):\n \"\"\"\n A realisation of tensorflow tensors wrapper for common NNCF algorithms.\n \"\"\"\n\n def __init__(self, tensor: tf.Variable):\n # In case somebody attempts to wrap\n # tensor twice\n if isinstance(tensor, self.__class__):\n tensor = tensor.tensor\n\n super().__init__(tensor, TFNNCFTensorProcessor)\n\n @property\n def device(self) -> tf.device:\n return self._tensor.device\n" ]
[ [ "torch.zeros", "torch.nn.BatchNorm2d", "torch.ones", "torch.nn.Conv2d", "torch.load", "torch.empty" ], [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.load" ], [ "tensorflow.concat", "tensorflow.ones", "tensorflow.device", "tensorflow.repeat", "tensorflow.debugging.assert_near" ] ]
ase2sprkkr/ase2sprkkr
[ "5e04f54365e4ab65d97bd11d573b078674548a59" ]
[ "src/ase2sprkkr/sprkkr/sprkkr_atoms.py" ]
[ "\"\"\" This file contains SPRKKRAtoms - an enhanced version of Atoms to be used\nwith SPRKKR \"\"\"\n\n\nfrom ase import Atoms\nfrom ..common.unique_values import UniqueValuesMapping\nimport spglib\nfrom ase.spacegroup import Spacegroup\nimport numpy as np\nfrom ..sprkkr.sites import Site\nfrom ..common.misc import numpy_index\n\nclass SPRKKRAtoms(Atoms):\n \"\"\" ASE Atoms object extended by the data necessary for SPR-KKR calculations \"\"\"\n\n @staticmethod\n def promote_ase_atoms(obj, symmetry=None):\n \"\"\" Convert ASE Atoms object to the one usable by SPRKKR.\n For the case of the usability it is a bit ugly hack: The __class__ attribute\n is replaced so the extra methods and properties of the objects will\n be available.\n\n Parameters\n ----------\n obj: ase.Atoms\n The atoms object to be promoted to be used for SPRKKR calculations\n\n symmetry: boolean or None\n The sites property of the resulting object will consider the symmetry of the structure.\n I.e., the by-symmetry-equal atomic sites will share the same sites object.\n Default None is the same as True, however it does not change the symmetry\n of the already promoted obj passed into the routine.\n \"\"\"\n if obj and not isinstance(obj, SPRKKRAtoms):\n if obj.__class__ is Atoms:\n obj.__class__ = SPRKKRAtoms\n else:\n if not isinstance(obj, Atoms):\n raise(f'Can not promote class {obj} of class {obj.__class__} to {SPRKKRAtoms}')\n\n class SprKKrAtomsEx(obj.__class__, SPRKKRAtoms):\n pass\n obj.__class__ = SprKKrAtomsEx\n\n obj._init(True if symmetry is None else symmetry)\n else:\n if symmetry is not None:\n obj.symmetry = symmetry\n return obj\n\n def __init__(self, *args, symmetry=True, potential=None, **kwargs):\n \"\"\"\n Creates SPRKKRAtoms atoms\n\n Parameters\n ----------\n *args: list\n The positionals arguments of ase.Atoms.__init__\n symmetry: boolean\n The symmetry will be computed when the sites property will be initialized.\n I.e., the by-symmetry-equal atomic sites will share the same sites object.\n **kwargs: dict\n The named arguments of ase.Atoms.__init__\n \"\"\"\n self._init(symmetry, potential)\n super().__init__(*args, **kwargs)\n\n def _init(self, symmetry=True, potential=None):\n \"\"\" The initialization of the additional (not-in-ASE) properties. To be used\n by constructor and by promote_ase_atoms\"\"\"\n self._unique_sites = None\n self._potential = potential\n self._symmetry = symmetry\n\n @property\n def symmetry(self):\n \"\"\"\n Whether the sites property is/will be generated using symmetry, i.e.\n whether the Sites objects in the sites property will be shared among\n symmetric atomic sites.\n \"\"\"\n return self._symmetry\n\n @symmetry.setter\n def symmetry(self, value):\n \"\"\"\n Recomputes the sites with enabled/disabled symmetry if the value of the property\n has changed.\n \"\"\"\n if self._symmetry == value:\n return\n self._symmetry = value\n if self._unique_sites is not None:\n if value:\n self._compute_sites_symmetry()\n else:\n self._cancel_sites_symmetry()\n\n\n def compute_spacegroup_for_atomic_numbers(self, atomic_numbers=None, symprec=1e-5):\n \"\"\" Return spacegroup that suits to the atoms' cell structure and to the given\n atomic_numbers (not necessary the real ones, they can be just ''labels'').\n \"\"\"\n\n atomic_numbers = atomic_numbers if atomic_numbers is not None else self.get_atomic_numbers()\n sg = spglib.get_spacegroup((self.get_cell(),\n self.get_scaled_positions(),\n atomic_numbers),\n symprec=symprec)\n if sg is None:\n return None\n sg_no = int(sg[sg.find('(') + 1:sg.find(')')])\n spacegroup = Spacegroup(sg_no)\n return spacegroup\n\n def compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-5):\n \"\"\" SPRKKR has some properties shared by all by-symmetry-equal sites.\n This method initializes _sites property, that holds these properties:\n makes identical all the atoms on the \"symmetry identical positions\" with\n the same atomic number.\n\n The method is called automatically when the sites property is firstly accessed.\n The effect of the method is the nearly same as setting the symmetry property.\n However, setting the symmetry property on an 'already symmetrized' object has\n no effect, while this methods always recompute the sites property.\n\n Parameters\n ----------\n spacegroup: Spacegroup\n If not None, the given spacegroup is used for determining the symmetry,\n instead of the one determined by cell geometry.\n\n atomic_numbers: [ int ]\n Atomic numbers used to determine the spacegroup (if it is not given) to compute\n the symmetry. The atomic numbers can be ''virtual'', just to denote the equivalence\n of the sites.\n The array should have the same length as the number of atoms in the unit cell.\n If None, self.symbols are used.\n\n consider_old: bool\n If True, and _unique_sites is not None, the non-symmetry-equivalent sites won't\n be equivalent in the newly computed symmetry.\n\n symprec: float\n A threshold for spatial error for the symmetry computing. See spglib.get_spacegroup\n\n \"\"\"\n self._symmetry = True\n SPRKKRAtoms._compute_sites_symmetry(**locals())\n\n def _compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-5):\n \"\"\" See compute_sites_symmetry - this metod does just the same, but it does not set the symmetry property.\"\"\"\n\n occupation = self.info.get('occupancy', {})\n if not spacegroup and self._symmetry:\n if atomic_numbers:\n mapping = UniqueValuesMapping(atomic_numbers)\n else:\n mapping = UniqueValuesMapping(self.get_atomic_numbers())\n if consider_old and self._unique_sites:\n mapping = mapping.merge(self._unique_sites)\n if occupation:\n def gen_occ():\n for i in range(len(mapping)):\n val = occupation.get(i, None)\n if val is None:\n yield val\n else:\n yield tuple((k, val[k]) for k in val)\n mapping = mapping.merge(gen_occ())\n\n spacegroup = self.compute_spacegroup_for_atomic_numbers(mapping.mapping, symprec=symprec)\n\n self.info['spacegroup'] = spacegroup\n\n if not spacegroup:\n return self.cancel_sites_symmetry()\n\n tags = spacegroup.tag_sites(self.get_scaled_positions())\n mapping = mapping.merge( tags )\n tags = mapping.mapping\n\n sites = np.empty(len(tags), dtype=object)\n\n uniq, umap = np.unique(tags, return_inverse = True)\n used = set()\n for i in range(len(uniq)):\n index = umap == i\n if self._unique_sites is not None:\n #first non-none of the given index\n possible = (i for i in self._unique_sites[index])\n site = next(filter(None, possible), None)\n if site in used:\n site = site.copy()\n else:\n used.add(site)\n else:\n site = None\n if not site:\n symbol = self.symbols[ numpy_index(umap,i)]\n for ai in np.where(index)[0]:\n if ai in occupation and occupation[ai]:\n symbol = occupation[ai]\n site = Site(self, symbol)\n sites[index] = site\n self.sites = sites\n\n def cancel_sites_symmetry(self):\n \"\"\" Cancel the use of symmetry in the structure, i.e., makes the Site object\n uniqe (not shared) for each atomic site.\n\n Calling this method is nearly equivalent to the setting the symmetry property\n to False, however, this method always recompute the sites object, while\n setting symmetry=False recomputes the sites property only if it was previously\n set to False.\n \"\"\"\n self._symmetry = False\n self._cancel_sites_symmetry()\n\n def _cancel_sites_symmetry(self):\n \"\"\" See cancel_sites_symmetry - this metod does just the same, but it does not set the symmetry property.\"\"\"\n sites = np.empty(len(self), dtype=object)\n used = set()\n occupation = self.info.get('occupancy', {})\n for i in range(len(self)):\n if self._unique_sites is not None:\n site=self._unique_sites[i]\n if site in used:\n site = site.copy()\n else:\n used.add(site)\n else:\n symbol = occupation[i] if i in occupation and occupation[i] else \\\n self.symbols[i]\n site = Site(self, symbol)\n sites[i] = site\n self.sites = sites\n\n @property\n def sites(self):\n \"\"\" The sites property holds all the information for the SPR-KKR package:\n atomic types (including number of semicore and valence electrons),\n occupancy, symmetries, meshes...\n Some of the properties are stored in the ASE atoms properties\n (e.g. occupancy, atomic symbol), however, ASE is not able to hold them\n all and/or to describe fully the SPR-KKR options; thus, these properties\n are hold in this array.\n\n The changes made on this array are reflected (as is possible)\n to the ASE properties, but the opposite does not hold - to reflect the changes\n in these properties please create a new Atoms object with given properties.\n \"\"\"\n if self._unique_sites is None:\n self._compute_sites_symmetry()\n return self._unique_sites\n\n @sites.setter\n def sites(self, v):\n \"\"\" Set the sites property and update all other dependent\n properties (symbols, occupancy) according to the sites \"\"\"\n an = np.zeros(len(v), dtype= int)\n occ = {}\n for i,j in enumerate(v):\n occ[i] = j.occupation.as_dict\n an[i] = j.occupation.primary_atomic_number\n self.set_atomic_numbers(an)\n self.info['occupancy'] = occ\n self._unique_sites = v\n\n @property\n def potential(self):\n if self._potential is None:\n self._potential = potentials.Potential.from_atoms(self)\n return self._potential\n\n @potential.setter\n def potential(self, potential):\n self._potential = potential\n\n def reset_sprkkr_potential(self):\n for i in self.sites:\n i.reset()\n if self._potential:\n self._potential.reset(update_atoms = False)\n self._potential.set_from_atoms()\n\n\n\n#at the last - to avoid circular imports\nfrom ..potentials import potentials\n" ]
[ [ "numpy.where", "numpy.unique" ] ]
shobhitagarwal1612/Emotion-Analysis
[ "069086fd528bd06e47521de450c398f00774087f" ]
[ "analyzer/build.py" ]
[ "import pickle\nfrom time import time\n\nfrom sklearn.cross_validation import train_test_split as tts\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import classification_report as clsr\nfrom sklearn.neural_network._base import identity\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom analyzer.Transformer import NLTKPreprocessor\n\n\n# @timeit\ndef build_and_evaluate(X, y, classifier=SGDClassifier, outpath=None, verbose=True):\n # @timeit\n def build(classifier, X, y=None):\n \"\"\"\n Inner build function that builds a single model.\n \"\"\"\n if isinstance(classifier, type):\n classifier = classifier()\n\n model = Pipeline([\n ('preprocessor', NLTKPreprocessor()),\n ('vectorizer', TfidfVectorizer(\n tokenizer=identity, preprocessor=None, lowercase=False)),\n ('classifier', classifier),\n ])\n\n model.fit(X, y)\n return model\n\n # Label encode the targets\n labels = LabelEncoder()\n y = labels.fit_transform(y)\n\n secs = time()\n\n # Begin evaluation\n if verbose: print(\"Building for evaluation\")\n X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2)\n model = build(classifier, X_train, y_train)\n\n if verbose:\n print(\"Evaluation model fit in {:0.3f} seconds\".format(time() - secs))\n print(\"Classification Report:\\n\")\n\n y_pred = model.predict(X_test)\n print(clsr(y_test, y_pred, target_names=labels.classes_))\n\n secs = time()\n if verbose:\n print(\"Building complete model and saving ...\")\n model = build(classifier, X, y)\n model.labels_ = labels\n\n if verbose:\n print(\"Complete model fit in {:0.3f} seconds\".format(time() - secs))\n\n if outpath:\n with open(outpath, 'wb') as f:\n pickle.dump(model, f)\n\n print(\"Model written out to {}\".format(outpath))\n\n return model\n" ]
[ [ "sklearn.metrics.classification_report", "sklearn.preprocessing.LabelEncoder", "sklearn.cross_validation.train_test_split", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
Myeonghan-Jeong/deep-learning-from-scratch
[ "fef3e327c49593b5df74728a1cba1144948a2999" ]
[ "chapter05/5.7.4_backpropagation_learning.py" ]
[ "from commons.neural_network import TwoLayerNet\nfrom datasets.mnist import load_mnist\nimport numpy as np\n\n(x_train, t_train), (x_test, t_test) = load_mnist(\n normalize=True, one_hot_label=True)\n\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\n\niters_num = 10000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\n\ntrain_loss_list = []\ntrain_acc_list = []\ntest_acc_list = []\n\niter_per_epoch = max(train_size / batch_size, 1)\n\nfor i in range(iters_num):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n # calculate gradients with backpropagation\n grad = network.gradient(x_batch, t_batch)\n\n for key in ('W1', 'b1', 'W2', 'b2'): # renewal\n network.params[key] -= learning_rate * grad[key]\n\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n\n if i % iter_per_epoch == 0:\n train_acc = network.accuracy(x_train, t_train)\n test_acc = network.accuracy(x_test, t_test)\n train_acc_list.append(train_acc)\n test_acc_list.append(test_acc)\n print(train_acc, test_acc)\n" ]
[ [ "numpy.random.choice" ] ]
alexxromero/WAKY-private
[ "aa15d9d138ba22e04628590f2c9583a86f2e54f2" ]
[ "kwakpriv/plotting/plottingtools.py" ]
[ "from __future__ import absolute_import\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\nfrom ..w_transform import HaarTransform, InvHaarTransform\n\ndef _zeros_like(obj):\n zeros = [np.zeros_like(lev, dtype=float) for lev in obj]\n return zeros\n\n\n__all__ = ['_findmin', '_findmax', '_BinData', '_NewColorMap',\n '_NSigmaFilter']\n\ndelta = 0.0 # Small number\n\ndef _findmin(array):\n minn = delta\n for i in array:\n if np.min(i) < minn:\n minn = np.min(i)\n return minn\n\ndef _findmax(array):\n maxx = delta\n for i in array:\n if np.max(i) > maxx:\n maxx = np.max(i)\n return maxx\n\ndef _BinData(data, bins):\n hist, edges = np.histogram(a=range(bins), bins=bins, weights=data)\n center = (edges[:-1]+edges[1:])/2.0\n width = edges[1:]-edges[:-1]\n return hist, edges, center, width\n\ndef _NewColorMap():\n R=float(0+172+242)\n G=(41.+181.+104.)\n B=(242.+81.+59.)\n #colors = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] #RGB\n #colors = [(0.172, 0.521, 0.729), (0.870, 0.325, 0.129)]\n colors = [(0.152, 0.552, 0.607),\n (0.666, 0.882, 0.035),\n (0.945, 0.337, 0.074)]\n nbins=2**15\n cmap_name='New'\n cm = LinearSegmentedColormap.from_list(cmap_name, colors, N=nbins)\n return cm\n\n\ndef _NSigmaFilter(data, hypothesis, nsigma,\n nsigma_min=None, nsigma_percent=None):\n\n WaveDec_data = HaarTransform(data)\n DataCoeffs = WaveDec_data[:-1]\n DataFirstTrend = WaveDec_data[-1]\n \n WaveDec_hypo = HaarTransform(hypothesis)\n HypoCoeffs = WaveDec_hypo[:-1]\n HypoFirstTrend = WaveDec_hypo[-1]\n\n Level = len(DataCoeffs)\n\n flatNsigma = []\n flatAbsNsigma = []\n flatDataCoeffs = []\n flatHypoCoeffs = []\n flatLoc = []\n\n count = 0\n for l in range(Level):\n J = 2**(Level-l-1)\n for j in range(J):\n flatNsigma.append(nsigma[l][j])\n flatAbsNsigma.append(abs(nsigma[l][j]))\n flatDataCoeffs.append(DataCoeffs[l][j])\n flatHypoCoeffs.append(HypoCoeffs[l][j])\n flatLoc.append([l, j])\n count += 1\n\n ixsort = np.argsort(flatAbsNsigma)[::-1]\n sortNsigma = [flatNsigma[ix] for ix in ixsort]\n sortDataCoeffs = [flatDataCoeffs[ix] for ix in ixsort]\n sortHypoCoeffs = [flatHypoCoeffs[ix] for ix in ixsort]\n sortLoc = [flatLoc[ix] for ix in ixsort]\n\n keepNsigma = []\n keepDeltaCoeff = []\n keepLoc = []\n if nsigma_min is not None:\n for i in range(len(sortNsigma)):\n if abs(sortNsigma[i]) > nsigma_min:\n keepNsigma.append(sortNsigma[i])\n keepDeltaCoeff.append(sortDataCoeffs[i]-sortHypoCoeffs[i])\n keepLoc.append(sortLoc[i])\n \n elif nsigma_percent is not None:\n net = len(sortNsigma)\n netkeep = int(np.ceil(net*nsigma_percent))\n keepNsigma = sortNsigma[:netkeep]\n keepDeltaCoeff = np.subtract(sortDataCoeffs[:netkeep],\n sortHypoCoeffs[:netkeep])\n keepLoc = sortLoc[:netkeep]\n\n else:\n keepNsigma = sortNsigma\n keepDeltaCoeff = np.subtract(sortDataCoeffs,\n sortHypoCoeffs)\n keepLoc = sortLoc\n\n keep = _zeros_like(WaveDec_data)\n for i in range(len(keepDeltaCoeff)):\n l = keepLoc[i][0]\n j = keepLoc[i][1]\n keep[l][j] = keepDeltaCoeff[i]\n keep[-1][0] = DataFirstTrend-HypoFirstTrend\n\n return keep\n\n\n\n" ]
[ [ "numpy.max", "numpy.zeros_like", "numpy.ceil", "numpy.min", "numpy.subtract", "numpy.argsort", "matplotlib.colors.LinearSegmentedColormap.from_list" ] ]
lemairecarl/pytorch-lightning
[ "85304d4672a9ed24a16f7f5b2abaa34148ab86f4" ]
[ "tests/utilities/test_deepspeed_collate_checkpoint.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport torch\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.plugins import DeepSpeedStrategy\nfrom pytorch_lightning.utilities.deepspeed import convert_zero_checkpoint_to_fp32_state_dict\nfrom tests.helpers.boring_model import BoringModel\nfrom tests.helpers.runif import RunIf\n\n\n@RunIf(min_gpus=2, deepspeed=True, standalone=True)\ndef test_deepspeed_collate_checkpoint(tmpdir):\n \"\"\"Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file.\"\"\"\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16\n )\n trainer.fit(model)\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n checkpoint_path = trainer.strategy.broadcast(checkpoint_path)\n trainer.save_checkpoint(checkpoint_path)\n trainer.strategy.barrier()\n if trainer.is_global_zero:\n # ensure function call works\n output_path = os.path.join(tmpdir, \"single_model.pt\")\n convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)\n _assert_checkpoint_equal(model, output_path)\n\n\ndef _assert_checkpoint_equal(model, output_path):\n assert os.path.exists(output_path)\n single_output = torch.load(output_path)\n state_dict = model.state_dict()\n for orig_param, saved_model_param in zip(state_dict.values(), single_output[\"state_dict\"].values()):\n if model.dtype == torch.half:\n # moved model to float32 for comparison with single fp32 saved weights\n saved_model_param = saved_model_param.half()\n assert torch.equal(orig_param.cpu(), saved_model_param)\n" ]
[ [ "torch.load" ] ]
DemonDamon/mask-detection-based-on-tf2odapi
[ "192ae544169c1230c21141c033800aa1bd94e9b6" ]
[ "object_detection/utils/target_assigner_utils_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for utils.target_assigner_utils.\"\"\"\r\n\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\nimport tensorflow.compat.v1 as tf\r\n\r\nfrom object_detection.utils import target_assigner_utils as ta_utils\r\nfrom object_detection.utils import test_case\r\n\r\n\r\nclass TargetUtilTest(parameterized.TestCase, test_case.TestCase):\r\n\r\n def test_image_shape_to_grids(self):\r\n def graph_fn():\r\n (y_grid, x_grid) = ta_utils.image_shape_to_grids(height=2, width=3)\r\n return y_grid, x_grid\r\n\r\n expected_y_grid = np.array([[0, 0, 0], [1, 1, 1]])\r\n expected_x_grid = np.array([[0, 1, 2], [0, 1, 2]])\r\n\r\n y_grid, x_grid = self.execute(graph_fn, [])\r\n\r\n np.testing.assert_array_equal(y_grid, expected_y_grid)\r\n np.testing.assert_array_equal(x_grid, expected_x_grid)\r\n\r\n @parameterized.parameters((False,), (True,))\r\n def test_coordinates_to_heatmap(self, sparse):\r\n if not hasattr(tf, 'tensor_scatter_nd_max'):\r\n self.skipTest('Cannot test function due to old TF version.')\r\n\r\n def graph_fn():\r\n (y_grid, x_grid) = ta_utils.image_shape_to_grids(height=3, width=5)\r\n y_coordinates = tf.constant([1.5, 0.5], dtype=tf.float32)\r\n x_coordinates = tf.constant([2.5, 4.5], dtype=tf.float32)\r\n sigma = tf.constant([0.1, 0.5], dtype=tf.float32)\r\n channel_onehot = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.float32)\r\n channel_weights = tf.constant([1, 1], dtype=tf.float32)\r\n heatmap = ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_coordinates,\r\n x_coordinates, sigma,\r\n channel_onehot,\r\n channel_weights, sparse=sparse)\r\n return heatmap\r\n\r\n heatmap = self.execute(graph_fn, [])\r\n # Peak at (1, 2) for the first class.\r\n self.assertAlmostEqual(1.0, heatmap[1, 2, 0])\r\n # Peak at (0, 4) for the second class.\r\n self.assertAlmostEqual(1.0, heatmap[0, 4, 1])\r\n\r\n def test_compute_floor_offsets_with_indices_onlysource(self):\r\n\r\n def graph_fn():\r\n y_source = tf.constant([1.5, 0.3], dtype=tf.float32)\r\n x_source = tf.constant([2.5, 4.2], dtype=tf.float32)\r\n (offsets, indices) = ta_utils.compute_floor_offsets_with_indices(\r\n y_source, x_source)\r\n\r\n return offsets, indices\r\n\r\n offsets, indices = self.execute(graph_fn, [])\r\n\r\n np.testing.assert_array_almost_equal(offsets,\r\n np.array([[0.5, 0.5], [0.3, 0.2]]))\r\n np.testing.assert_array_almost_equal(indices,\r\n np.array([[1, 2], [0, 4]]))\r\n\r\n def test_compute_floor_offsets_with_indices_and_targets(self):\r\n\r\n def graph_fn():\r\n y_source = tf.constant([1.5, 0.3], dtype=tf.float32)\r\n x_source = tf.constant([2.5, 4.2], dtype=tf.float32)\r\n y_target = tf.constant([2.1, 0.1], dtype=tf.float32)\r\n x_target = tf.constant([1.2, 4.5], dtype=tf.float32)\r\n (offsets, indices) = ta_utils.compute_floor_offsets_with_indices(\r\n y_source, x_source, y_target, x_target)\r\n return offsets, indices\r\n\r\n offsets, indices = self.execute(graph_fn, [])\r\n\r\n np.testing.assert_array_almost_equal(offsets,\r\n np.array([[1.1, -0.8], [0.1, 0.5]]))\r\n np.testing.assert_array_almost_equal(indices, np.array([[1, 2], [0, 4]]))\r\n\r\n def test_compute_floor_offsets_with_indices_multisources(self):\r\n\r\n def graph_fn():\r\n y_source = tf.constant([[1.0, 0.0], [2.0, 3.0]], dtype=tf.float32)\r\n x_source = tf.constant([[2.0, 4.0], [3.0, 3.0]], dtype=tf.float32)\r\n y_target = tf.constant([2.1, 0.1], dtype=tf.float32)\r\n x_target = tf.constant([1.2, 4.5], dtype=tf.float32)\r\n (offsets, indices) = ta_utils.compute_floor_offsets_with_indices(\r\n y_source, x_source, y_target, x_target)\r\n return offsets, indices\r\n\r\n offsets, indices = self.execute(graph_fn, [])\r\n # Offset from the first source to target.\r\n np.testing.assert_array_almost_equal(offsets[:, 0, :],\r\n np.array([[1.1, -0.8], [-1.9, 1.5]]))\r\n # Offset from the second source to target.\r\n np.testing.assert_array_almost_equal(offsets[:, 1, :],\r\n np.array([[2.1, -2.8], [-2.9, 1.5]]))\r\n # Indices from the first source to target.\r\n np.testing.assert_array_almost_equal(indices[:, 0, :],\r\n np.array([[1, 2], [2, 3]]))\r\n # Indices from the second source to target.\r\n np.testing.assert_array_almost_equal(indices[:, 1, :],\r\n np.array([[0, 4], [3, 3]]))\r\n\r\n def test_get_valid_keypoints_mask(self):\r\n\r\n def graph_fn():\r\n class_onehot = tf.constant(\r\n [[0, 0, 1, 0, 0],\r\n [0, 1, 0, 0, 0],\r\n [0, 0, 1, 0, 1]], dtype=tf.float32)\r\n keypoints = tf.constant(\r\n [[0.1, float('nan'), 0.2, 0.0],\r\n [0.0, 0.0, 0.1, 0.9],\r\n [3.2, 4.3, float('nan'), 0.2]],\r\n dtype=tf.float32)\r\n keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)\r\n mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(\r\n keypoint_coordinates=keypoint_coordinates,\r\n class_id=2,\r\n class_onehot=class_onehot,\r\n keypoint_indices=[1, 2])\r\n\r\n return mask, keypoints_nan_to_zeros\r\n\r\n keypoints = np.array([[0.0, 0.2],\r\n [0.0, 0.1],\r\n [4.3, 0.0]])\r\n expected_mask = np.array([[0, 1], [0, 0], [1, 0]])\r\n expected_keypoints = np.stack([keypoints, keypoints], axis=2)\r\n mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])\r\n\r\n np.testing.assert_array_equal(mask, expected_mask)\r\n np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,\r\n expected_keypoints)\r\n\r\n def test_get_valid_keypoints_with_mask(self):\r\n def graph_fn():\r\n class_onehot = tf.constant(\r\n [[0, 0, 1, 0, 0],\r\n [0, 1, 0, 0, 0],\r\n [0, 0, 1, 0, 1]], dtype=tf.float32)\r\n keypoints = tf.constant(\r\n [[0.1, float('nan'), 0.2, 0.0],\r\n [0.0, 0.0, 0.1, 0.9],\r\n [3.2, 4.3, float('nan'), 0.2]],\r\n dtype=tf.float32)\r\n keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)\r\n weights = tf.constant([0.0, 0.0, 1.0])\r\n mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(\r\n keypoint_coordinates=keypoint_coordinates,\r\n class_id=2,\r\n class_onehot=class_onehot,\r\n class_weights=weights,\r\n keypoint_indices=[1, 2])\r\n return mask, keypoints_nan_to_zeros\r\n\r\n expected_mask = np.array([[0, 0], [0, 0], [1, 0]])\r\n keypoints = np.array([[0.0, 0.2],\r\n [0.0, 0.1],\r\n [4.3, 0.0]])\r\n expected_keypoints = np.stack([keypoints, keypoints], axis=2)\r\n mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])\r\n\r\n np.testing.assert_array_equal(mask, expected_mask)\r\n np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,\r\n expected_keypoints)\r\n\r\n def test_blackout_pixel_weights_by_box_regions(self):\r\n def graph_fn():\r\n boxes = tf.constant(\r\n [[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],\r\n dtype=tf.float32)\r\n blackout = tf.constant([True, False, True], dtype=tf.bool)\r\n blackout_pixel_weights_by_box_regions = tf.function(\r\n ta_utils.blackout_pixel_weights_by_box_regions)\r\n output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)\r\n return output\r\n\r\n output = self.execute(graph_fn, [])\r\n # All zeros in region [0:6, 0:6].\r\n self.assertAlmostEqual(np.sum(output[0:6, 0:6]), 0.0)\r\n # All zeros in region [12:19, 6:9].\r\n self.assertAlmostEqual(np.sum(output[6:9, 12:19]), 0.0)\r\n # All other pixel weights should be 1.0.\r\n # 20 * 10 - 6 * 6 - 3 * 7 = 143.0\r\n self.assertAlmostEqual(np.sum(output), 143.0)\r\n\r\n def test_blackout_pixel_weights_by_box_regions_zero_instance(self):\r\n def graph_fn():\r\n boxes = tf.zeros([0, 4], dtype=tf.float32)\r\n blackout = tf.zeros([0], dtype=tf.bool)\r\n blackout_pixel_weights_by_box_regions = tf.function(\r\n ta_utils.blackout_pixel_weights_by_box_regions)\r\n output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)\r\n return output\r\n\r\n output = self.execute(graph_fn, [])\r\n # The output should be all 1s since there's no annotation provided.\r\n np.testing.assert_array_equal(output, np.ones([10, 20], dtype=np.float32))\r\n\r\n def test_get_surrounding_grids(self):\r\n\r\n def graph_fn():\r\n y_coordinates = tf.constant([0.5], dtype=tf.float32)\r\n x_coordinates = tf.constant([4.5], dtype=tf.float32)\r\n output = ta_utils.get_surrounding_grids(\r\n height=3,\r\n width=5,\r\n y_coordinates=y_coordinates,\r\n x_coordinates=x_coordinates,\r\n radius=1)\r\n return output\r\n\r\n y_indices, x_indices, valid = self.execute(graph_fn, [])\r\n\r\n # Five neighboring indices: [-1, 4] (out of bound), [0, 3], [0, 4],\r\n # [0, 5] (out of bound), [1, 4].\r\n np.testing.assert_array_almost_equal(\r\n y_indices,\r\n np.array([[0.0, 0.0, 0.0, 0.0, 1.0]]))\r\n np.testing.assert_array_almost_equal(\r\n x_indices,\r\n np.array([[0.0, 3.0, 4.0, 0.0, 4.0]]))\r\n self.assertAllEqual(valid, [[False, True, True, False, True]])\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n" ]
[ [ "numpy.array", "tensorflow.compat.v1.zeros", "numpy.sum", "numpy.testing.assert_array_equal", "numpy.ones", "tensorflow.compat.v1.stack", "numpy.testing.assert_array_almost_equal", "numpy.stack", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.function", "tensorflow.compat.v1.constant" ] ]
junhyeokahn/ASE389
[ "a57d668f968da1db56f0dfe8dadad548ad631f33" ]
[ "pnc/manipulator_pnc/manipulator_interface.py" ]
[ "import os\nimport sys\ncwd = os.getcwd()\nsys.path.append(cwd)\nimport time, math\n\nimport numpy as np\n\nfrom pnc.interface import Interface\nfrom config.manipulator_config import ManipulatorConfig\nfrom pnc.robot_system.pinocchio_robot_system import PinocchioRobotSystem\n\n\nclass ManipulatorInterface(Interface):\n def __init__(self):\n super(ManipulatorInterface, self).__init__()\n\n self._robot = PinocchioRobotSystem(\n cwd + \"/robot_model/manipulator/three_link_manipulator.urdf\",\n cwd + \"/robot_model/manipulator\", True,\n ManipulatorConfig.PRINT_ROBOT_INFO)\n\n def get_command(self, sensor_data):\n # Update Robot\n self._robot.update_system(\n sensor_data[\"base_com_pos\"], sensor_data[\"base_com_quat\"],\n sensor_data[\"base_com_lin_vel\"], sensor_data[\"base_com_ang_vel\"],\n sensor_data[\"base_joint_pos\"], sensor_data[\"base_joint_quat\"],\n sensor_data[\"base_joint_lin_vel\"],\n sensor_data[\"base_joint_ang_vel\"], sensor_data[\"joint_pos\"],\n sensor_data[\"joint_vel\"])\n\n # Operational Space Control\n jtrq_cmd = self._compute_osc_command()\n jpos_cmd = np.zeros_like(jtrq_cmd)\n jvel_cmd = np.zeros_like(jtrq_cmd)\n\n # Compute Cmd\n command = self._robot.create_cmd_ordered_dict(jpos_cmd, jvel_cmd,\n jtrq_cmd)\n\n # Increase time variables\n self._count += 1\n self._running_time += ManipulatorConfig.DT\n\n return command\n\n def _compute_osc_command(self):\n ## TODO : Implement Operational Space Control\n jtrq = np.zeros(self._robot.n_a)\n\n return jtrq\n" ]
[ [ "numpy.zeros_like", "numpy.zeros" ] ]
jairad26/tobler
[ "a95ef53b1e366f4cec7c1b84063a2fe810dccb8e" ]
[ "tobler/area_weighted/area_interpolate.py" ]
[ "\"\"\"\nArea Weighted Interpolation\n\n\"\"\"\n\nimport numpy as np\nimport geopandas as gpd\nfrom ._vectorized_raster_interpolation import _fast_append_profile_in_gdf\nimport warnings\nfrom scipy.sparse import dok_matrix, diags, coo_matrix\nimport pandas as pd\nimport os\n\nfrom tobler.util.util import _check_crs, _nan_check, _inf_check, _check_presence_of_crs\n\n\ndef _chunk_dfs(geoms_to_chunk, geoms_full, n_jobs):\n chunk_size = geoms_to_chunk.shape[0] // n_jobs + 1\n for i in range(n_jobs):\n start = i * chunk_size\n yield geoms_to_chunk.iloc[start : start + chunk_size], geoms_full\n\n\ndef _index_n_query(geoms1, geoms2):\n # Pick largest for STRTree, query_bulk the smallest\n if geoms1.shape[0] > geoms2.shape[0]:\n large = geoms1\n small = geoms2\n else:\n large = geoms2\n small = geoms1\n # Build tree + query\n qry_polyIDs, tree_polyIDs = large.sindex.query_bulk(small, predicate=\"intersects\")\n # Remap IDs to global\n large_global_ids = large.iloc[tree_polyIDs].index.values\n small_global_ids = small.iloc[qry_polyIDs].index.values\n # Return always global IDs for geoms1, geoms2\n if geoms1.shape[0] > geoms2.shape[0]:\n return np.array([large_global_ids, small_global_ids]).T\n else:\n return np.array([small_global_ids, large_global_ids]).T\n\n\ndef _chunk_polys(id_pairs, geoms_left, geoms_right, n_jobs):\n chunk_size = id_pairs.shape[0] // n_jobs + 1\n for i in range(n_jobs):\n start = i * chunk_size\n chunk1 = geoms_left.values.data[id_pairs[start : start + chunk_size, 0]]\n chunk2 = geoms_right.values.data[id_pairs[start : start + chunk_size, 1]]\n yield chunk1, chunk2\n\n\ndef _intersect_area_on_chunk(geoms1, geoms2):\n import pygeos\n\n areas = pygeos.area(pygeos.intersection(geoms1, geoms2))\n return areas\n\n\ndef _area_tables_binning_parallel(source_df, target_df, n_jobs=-1):\n \"\"\"Construct area allocation and source-target correspondence tables using\n a parallel spatial indexing approach\n ...\n\n NOTE: currently, the largest df is chunked and the other one is shipped in\n full to each core; within each process, the spatial index is built for the\n largest set of geometries, and the other one used for `query_bulk`\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n GeoDataFrame containing input data and polygons\n target_df : geopandas.GeoDataFramee\n GeoDataFrame defining the output geometries\n n_jobs : int\n [Optional. Default=-1] Number of processes to run in parallel. If -1,\n this is set to the number of CPUs available\n\n Returns\n -------\n tables : scipy.sparse.dok_matrix\n\n \"\"\"\n from joblib import Parallel, delayed, parallel_backend\n\n if _check_crs(source_df, target_df):\n pass\n else:\n return None\n if n_jobs == -1:\n n_jobs = os.cpu_count()\n\n df1 = source_df.copy()\n df2 = target_df.copy()\n\n # Chunk the largest, ship the smallest in full\n if df1.shape[0] > df2.shape[1]:\n to_chunk = df1\n df_full = df2\n else:\n to_chunk = df2\n df_full = df1\n\n # Spatial index query\n ## Reindex on positional IDs\n to_workers = _chunk_dfs(\n gpd.GeoSeries(to_chunk.geometry.values, crs=to_chunk.crs),\n gpd.GeoSeries(df_full.geometry.values, crs=df_full.crs),\n n_jobs,\n )\n\n with parallel_backend(\"loky\", inner_max_num_threads=1):\n worker_out = Parallel(n_jobs=n_jobs)(\n delayed(_index_n_query)(*chunk_pair) for chunk_pair in to_workers\n )\n\n ids_src, ids_tgt = np.concatenate(worker_out).T\n\n # Intersection + area calculation\n chunks_to_intersection = _chunk_polys(\n np.vstack([ids_src, ids_tgt]).T, df1.geometry, df2.geometry, n_jobs\n )\n with parallel_backend(\"loky\", inner_max_num_threads=1):\n worker_out = Parallel(n_jobs=n_jobs)(\n delayed(_intersect_area_on_chunk)(*chunk_pair)\n for chunk_pair in chunks_to_intersection\n )\n areas = np.concatenate(worker_out)\n\n # Build DOK table\n table = coo_matrix(\n (areas, (ids_src, ids_tgt),),\n shape=(df1.shape[0], df2.shape[0]),\n dtype=np.float32,\n )\n table = table.todok()\n return table\n\n\ndef _area_tables_binning(source_df, target_df, spatial_index):\n \"\"\"Construct area allocation and source-target correspondence tables using a spatial indexing approach\n ...\n\n NOTE: this currently relies on Geopandas' spatial index machinery\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n GeoDataFrame containing input data and polygons\n target_df : geopandas.GeoDataFramee\n GeoDataFrame defining the output geometries\n spatial_index : str\n Spatial index to use to build the allocation of area from source to\n target tables. It currently support the following values:\n - \"source\": build the spatial index on `source_df`\n - \"target\": build the spatial index on `target_df`\n - \"auto\": attempts to guess the most efficient alternative.\n Currently, this option uses the largest table to build the\n index, and performs a `bulk_query` on the shorter table.\n\n Returns\n -------\n tables : scipy.sparse.dok_matrix\n\n \"\"\"\n if _check_crs(source_df, target_df):\n pass\n else:\n return None\n\n df1 = source_df.copy()\n df2 = target_df.copy()\n\n # it is generally more performant to use the longer df as spatial index\n if spatial_index == \"auto\":\n if df1.shape[0] > df2.shape[0]:\n spatial_index = \"source\"\n else:\n spatial_index = \"target\"\n\n if spatial_index == \"source\":\n ids_tgt, ids_src = df1.sindex.query_bulk(df2.geometry, predicate=\"intersects\")\n elif spatial_index == \"target\":\n ids_src, ids_tgt = df2.sindex.query_bulk(df1.geometry, predicate=\"intersects\")\n else:\n raise ValueError(\n f\"'{spatial_index}' is not a valid option. Use 'auto', 'source' or 'target'.\"\n )\n\n areas = df1.geometry.values[ids_src].intersection(df2.geometry.values[ids_tgt]).area\n\n table = coo_matrix(\n (areas, (ids_src, ids_tgt),),\n shape=(df1.shape[0], df2.shape[0]),\n dtype=np.float32,\n )\n\n table = table.todok()\n\n return table\n\n\ndef _area_tables(source_df, target_df):\n \"\"\"\n Construct area allocation and source-target correspondence tables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n target_df : geopandas.GeoDataFrame\n\n Returns\n -------\n tables : tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n\n\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n Union geometry is a geometry formed by the intersection of a source geometry and a target geometry\n\n SU Maps source geometry to union geometry, UT maps union geometry to target geometry\n\n \"\"\"\n if _check_crs(source_df, target_df):\n pass\n else:\n return None\n source_df = source_df.copy()\n source_df = source_df.copy()\n\n n_s = source_df.shape[0]\n n_t = target_df.shape[0]\n _left = np.arange(n_s)\n _right = np.arange(n_t)\n source_df.loc[:, \"_left\"] = _left # create temporary index for union\n target_df.loc[:, \"_right\"] = _right # create temporary index for union\n res_union = gpd.overlay(source_df, target_df, how=\"union\")\n n_u, _ = res_union.shape\n SU = np.zeros(\n (n_s, n_u)\n ) # holds area of intersection of source geom with union geom\n UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom\n for index, row in res_union.iterrows():\n # only union polygons that intersect both a source and a target geometry matter\n if not np.isnan(row[\"_left\"]) and not np.isnan(row[\"_right\"]):\n s_id = int(row[\"_left\"])\n t_id = int(row[\"_right\"])\n SU[s_id, index] = row[row.geometry.name].area\n UT[index, t_id] = 1\n source_df.drop([\"_left\"], axis=1, inplace=True)\n target_df.drop([\"_right\"], axis=1, inplace=True)\n return SU, UT\n\n\ndef _area_interpolate_binning(\n source_df,\n target_df,\n extensive_variables=None,\n intensive_variables=None,\n table=None,\n allocate_total=True,\n spatial_index=\"auto\",\n n_jobs=1,\n categorical_variables=None,\n):\n \"\"\"\n Area interpolation for extensive, intensive and categorical variables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n target_df : geopandas.GeoDataFrame\n extensive_variables : list\n [Optional. Default=None] Columns in dataframes for extensive variables\n intensive_variables : list\n [Optional. Default=None] Columns in dataframes for intensive variables\n table : scipy.sparse.dok_matrix\n [Optional. Default=None] Area allocation source-target correspondence\n table. If not provided, it will be built from `source_df` and\n `target_df` using `tobler.area_interpolate._area_tables_binning`\n allocate_total : boolean\n [Optional. Default=True] True if total value of source area should be\n allocated. False if denominator is area of i. Note that the two cases\n would be identical when the area of the source polygon is exhausted by\n intersections. See Notes for more details.\n spatial_index : str\n [Optional. Default=\"auto\"] Spatial index to use to build the\n allocation of area from source to target tables. It currently support\n the following values:\n - \"source\": build the spatial index on `source_df`\n - \"target\": build the spatial index on `target_df`\n - \"auto\": attempts to guess the most efficient alternative.\n Currently, this option uses the largest table to build the\n index, and performs a `bulk_query` on the shorter table.\n This argument is ignored if n_jobs>1 (or n_jobs=-1).\n n_jobs : int\n [Optional. Default=1] Number of processes to run in parallel to\n generate the area allocation. If -1, this is set to the number of CPUs\n available. If `table` is passed, this is ignored.\n NOTE: as of Jan'21 multi-core functionality requires master versions\n of `pygeos` and `geopandas`.\n categorical_variables : list\n [Optional. Default=None] Columns in dataframes for categorical variables\n\n Returns\n -------\n estimates : geopandas.GeoDataFrame\n new geodaraframe with interpolated variables as columns and target_df geometry\n as output geometry\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n For an extensive variable, the estimate at target polygon j (default case) is:\n\n .. math::\n v_j = \\\\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\\\sum_k a_{i,k}\n\n If the area of the source polygon is not exhausted by intersections with\n target polygons and there is reason to not allocate the complete value of\n an extensive attribute, then setting allocate_total=False will use the\n following weights:\n\n .. math::\n v_j = \\\\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / a_i\n\n where a_i is the total area of source polygon i.\n For an intensive variable, the estimate at target polygon j is:\n\n .. math::\n v_j = \\\\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\\\sum_k a_{k,j}\n\n For categorical variables, the estimate returns ratio of presence of each\n unique category.\n \"\"\"\n source_df = source_df.copy()\n target_df = target_df.copy()\n\n if _check_crs(source_df, target_df):\n pass\n else:\n return None\n\n if table is None:\n if n_jobs == 1:\n table = _area_tables_binning(source_df, target_df, spatial_index)\n else:\n table = _area_tables_binning_parallel(source_df, target_df, n_jobs=n_jobs)\n\n den = source_df[source_df.geometry.name].area.values\n if allocate_total:\n den = np.asarray(table.sum(axis=1))\n den = den + (den == 0)\n den = 1.0 / den\n n = den.shape[0]\n den = den.reshape((n,))\n den = diags([den], [0])\n weights = den.dot(table) # row standardize table\n\n dfs = []\n extensive = []\n if extensive_variables:\n for variable in extensive_variables:\n vals = _nan_check(source_df, variable)\n vals = _inf_check(source_df, variable)\n estimates = diags([vals], [0]).dot(weights)\n estimates = estimates.sum(axis=0)\n extensive.append(estimates.tolist()[0])\n\n extensive = np.asarray(extensive)\n extensive = np.array(extensive)\n extensive = pd.DataFrame(extensive.T, columns=extensive_variables)\n\n area = np.asarray(table.sum(axis=0))\n den = 1.0 / (area + (area == 0))\n n, k = den.shape\n den = den.reshape((k,))\n den = diags([den], [0])\n weights = table.dot(den)\n\n intensive = []\n if intensive_variables:\n for variable in intensive_variables:\n vals = _nan_check(source_df, variable)\n vals = _inf_check(source_df, variable)\n n = vals.shape[0]\n vals = vals.reshape((n,))\n estimates = diags([vals], [0])\n estimates = estimates.dot(weights).sum(axis=0)\n intensive.append(estimates.tolist()[0])\n\n intensive = np.asarray(intensive)\n intensive = pd.DataFrame(intensive.T, columns=intensive_variables)\n\n if categorical_variables:\n categorical = {}\n for variable in categorical_variables:\n unique = source_df[variable].unique()\n for value in unique:\n mask = source_df[variable] == value\n categorical[f\"{variable}_{value}\"] = np.asarray(\n table[mask].sum(axis=0)\n )[0]\n\n categorical = pd.DataFrame(categorical)\n categorical = categorical.div(target_df.area, axis=\"rows\")\n\n if extensive_variables:\n dfs.append(extensive)\n if intensive_variables:\n dfs.append(intensive)\n if categorical_variables:\n dfs.append(categorical)\n\n df = pd.concat(dfs, axis=1)\n df[\"geometry\"] = target_df[target_df.geometry.name].reset_index(drop=True)\n df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))\n return df\n\n\ndef _area_interpolate(\n source_df,\n target_df,\n extensive_variables=None,\n intensive_variables=None,\n tables=None,\n allocate_total=True,\n):\n \"\"\"\n Area interpolation for extensive and intensive variables.\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame (required)\n geodataframe with polygon geometries\n target_df : geopandas.GeoDataFrame (required)\n geodataframe with polygon geometries\n extensive_variables : list, (optional)\n columns in dataframes for extensive variables\n intensive_variables : list, (optional)\n columns in dataframes for intensive variables\n tables : tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n allocate_total : boolean\n True if total value of source area should be allocated.\n False if denominator is area of i. Note that the two cases\n would be identical when the area of the source polygon is\n exhausted by intersections. See Notes for more details.\n\n Returns\n -------\n estimates : geopandas.GeoDataFrame\n new geodaraframe with interpolated variables as columns and target_df geometry\n as output geometry\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n\n For an extensive variable, the estimate at target polygon j (default case) is:\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{i,k}\n\n\n If the area of the source polygon is not exhausted by intersections with\n target polygons and there is reason to not allocate the complete value of\n an extensive attribute, then setting allocate_total=False will use the\n following weights:\n\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / a_i\n\n where a_i is the total area of source polygon i.\n\n\n For an intensive variable, the estimate at target polygon j is:\n\n v_j = \\sum_i v_i w_{i,j}\n\n w_{i,j} = a_{i,j} / \\sum_k a_{k,j}\n\n \"\"\"\n source_df = source_df.copy()\n target_df = target_df.copy()\n\n if _check_crs(source_df, target_df):\n pass\n else:\n return None\n\n if tables is None:\n SU, UT = _area_tables(source_df, target_df)\n else:\n SU, UT = tables\n den = source_df[source_df.geometry.name].area.values\n if allocate_total:\n den = SU.sum(axis=1)\n den = den + (den == 0)\n weights = np.dot(np.diag(1 / den), SU)\n\n dfs = []\n extensive = []\n if extensive_variables:\n for variable in extensive_variables:\n vals = _nan_check(source_df, variable)\n vals = _inf_check(source_df, variable)\n estimates = np.dot(np.diag(vals), weights)\n estimates = np.dot(estimates, UT)\n estimates = estimates.sum(axis=0)\n extensive.append(estimates)\n extensive = np.array(extensive)\n extensive = pd.DataFrame(extensive.T, columns=extensive_variables)\n\n ST = np.dot(SU, UT)\n area = ST.sum(axis=0)\n den = np.diag(1.0 / (area + (area == 0)))\n weights = np.dot(ST, den)\n intensive = []\n if intensive_variables:\n for variable in intensive_variables:\n vals = _nan_check(source_df, variable)\n vals = _inf_check(source_df, variable)\n vals.shape = (len(vals), 1)\n est = (vals * weights).sum(axis=0)\n intensive.append(est)\n intensive = np.array(intensive)\n intensive = pd.DataFrame(intensive.T, columns=intensive_variables)\n\n if extensive_variables:\n dfs.append(extensive)\n if intensive_variables:\n dfs.append(intensive)\n\n df = pd.concat(dfs, axis=1)\n df[\"geometry\"] = target_df[target_df.geometry.name].reset_index(drop=True)\n df = gpd.GeoDataFrame(df.replace(np.inf, np.nan))\n return df\n\n\ndef _area_tables_raster(\n source_df, target_df, raster_path, codes=[21, 22, 23, 24], force_crs_match=True\n):\n \"\"\"\n Construct area allocation and source-target correspondence tables according to a raster 'populated' areas\n\n Parameters\n ----------\n source_df : geopandas.GeoDataFrame\n geeodataframe with geometry column of polygon type\n target_df : geopandas.GeoDataFrame\n geodataframe with geometry column of polygon type\n raster_path : str\n the path to the associated raster image.\n codes : list\n list of integer code values that should be considered as 'populated'.\n Since this draw inspiration using the National Land Cover Database (NLCD), the default is 21 (Developed, Open Space), 22 (Developed, Low Intensity), 23 (Developed, Medium Intensity) and 24 (Developed, High Intensity).\n The description of each code can be found here: https://www.mrlc.gov/sites/default/files/metadata/landcover.html\n Only taken into consideration for harmonization raster based.\n force_crs_match : bool (default is True)\n Whether the Coordinate Reference System (CRS) of the polygon will be reprojected to the CRS of the raster file.\n It is recommended to let this argument as True.\n\n Returns\n -------\n tables: tuple (optional)\n two 2-D numpy arrays\n SU: area of intersection of source geometry i with union geometry j\n UT: binary mapping of union geometry j to target geometry t\n\n Notes\n -----\n The assumption is both dataframes have the same coordinate reference system.\n\n Union geometry is a geometry formed by the intersection of a source geometry and a target geometry\n\n SU Maps source geometry to union geometry, UT maps union geometry to target geometry\n\n\n\n \"\"\"\n\n if _check_crs(source_df, target_df):\n pass\n else:\n return None\n source_df = source_df.copy()\n target_df = target_df.copy()\n n_s = source_df.shape[0]\n n_t = target_df.shape[0]\n _left = np.arange(n_s)\n _right = np.arange(n_t)\n source_df.loc[:, \"_left\"] = _left # create temporary index for union\n target_df.loc[:, \"_right\"] = _right # create temporary index for union\n\n res_union_pre = gpd.overlay(source_df, target_df, how=\"union\")\n\n # Establishing a CRS for the generated union\n warnings.warn(\n \"The CRS for the generated union will be set to be the same as source_df.\"\n )\n res_union_pre.crs = source_df.crs\n\n # The 'append_profile_in_gdf' function is present in nlcd.py script\n res_union = _fast_append_profile_in_gdf(\n res_union_pre, raster_path, force_crs_match=force_crs_match\n )\n\n str_codes = [str(i) for i in codes]\n str_list = [\"Type_\" + i for i in str_codes]\n\n # Extract list of code names that actually appear in the appended dataset\n str_list_ok = [col for col in res_union.columns if col in str_list]\n\n res_union[\"Populated_Pixels\"] = res_union[str_list_ok].sum(axis=1)\n\n n_u, _ = res_union.shape\n SU = np.zeros(\n (n_s, n_u)\n ) # holds area of intersection of source geom with union geom\n UT = np.zeros((n_u, n_t)) # binary table mapping union geom to target geom\n\n for index, row in res_union.iterrows():\n # only union polygons that intersect both a source and a target geometry matter\n if not np.isnan(row[\"_left\"]) and not np.isnan(row[\"_right\"]):\n s_id = int(row[\"_left\"])\n t_id = int(row[\"_right\"])\n SU[s_id, index] = row[\"Populated_Pixels\"]\n UT[index, t_id] = 1\n source_df.drop([\"_left\"], axis=1, inplace=True)\n target_df.drop([\"_right\"], axis=1, inplace=True)\n return SU, UT\n" ]
[ [ "numpy.concatenate", "scipy.sparse.coo_matrix", "numpy.array", "numpy.dot", "numpy.isnan", "scipy.sparse.diags", "numpy.zeros", "numpy.asarray", "pandas.DataFrame", "numpy.arange", "pandas.concat", "numpy.diag", "numpy.vstack" ] ]
quantrocket-llc/trading-calendars
[ "b72630cbcb288601c62e61ebe002a9043f9a3112" ]
[ "trading_calendars/tests/test_xshg_calendar.py" ]
[ "from unittest import TestCase\n\nimport pandas as pd\nfrom pytz import UTC\n\nfrom trading_calendars.exchange_calendar_xshg import XSHGExchangeCalendar\n\nfrom .test_trading_calendar import ExchangeCalendarTestBase\nfrom .test_utils import T\n\n\nclass XSHGCalendarTestCase(ExchangeCalendarTestBase, TestCase):\n\n answer_key_filename = \"xshg\"\n calendar_class = XSHGExchangeCalendar\n\n # Shanghai stock exchange is open from 9:30 am to 3pm\n # (for now, ignoring lunch break)\n MAX_SESSION_HOURS = 5.5\n\n HAVE_EARLY_CLOSES = False\n\n MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp(\"2011-04-07\", tz=UTC)\n\n def test_normal_year(self):\n expected_holidays_2017 = [\n T(\"2017-01-02\"),\n T(\"2017-01-27\"),\n T(\"2017-01-30\"),\n T(\"2017-01-31\"),\n T(\"2017-02-01\"),\n T(\"2017-02-02\"),\n T(\"2017-04-03\"),\n T(\"2017-04-04\"),\n T(\"2017-05-01\"),\n T(\"2017-05-29\"),\n T(\"2017-05-30\"),\n T(\"2017-10-02\"),\n T(\"2017-10-03\"),\n T(\"2017-10-04\"),\n T(\"2017-10-05\"),\n T(\"2017-10-06\"),\n ]\n\n for session_label in expected_holidays_2017:\n self.assertNotIn(session_label, self.calendar.all_sessions)\n\n def test_constrain_construction_dates(self):\n # the XSHG calendar currently goes from 1999 to 2025, inclusive.\n with self.assertRaises(ValueError) as e:\n self.calendar_class(T(\"1998-12-31\"), T(\"2005-01-01\"))\n\n self.assertEqual(\n str(e.exception),\n (\n \"The XSHG holidays are only recorded back to 1999,\"\n \" cannot instantiate the XSHG calendar back to 1998.\"\n ),\n )\n\n with self.assertRaises(ValueError) as e:\n self.calendar_class(T(\"2005-01-01\"), T(\"2026-01-01\"))\n\n self.assertEqual(\n str(e.exception),\n (\n \"The XSHG holidays are only recorded to 2025,\"\n \" cannot instantiate the XSHG calendar for 2026.\"\n ),\n )\n" ]
[ [ "pandas.Timestamp" ] ]
sintefneodroid/vision
[ "a4e66251ead99f15f4697bfe2abd00e2f388e743", "a4e66251ead99f15f4697bfe2abd00e2f388e743" ]
[ "samples/regression/vae/flow/data_loader.py", "neodroidvision/data/detection/coco/deprec/s_coco_utilities.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n__author__ = \"Christian Heider Nielsen\"\n__doc__ = r\"\"\"\n \"\"\"\n\nimport h5py\nimport torch\nimport torch.utils\nimport torch.utils.data\n\nfrom .h5_mnist_data import download_binary_mnist\n\n\ndef load_binary_mnist(cfg, **kwcfg):\n fname = cfg.data_dir / \"binary_mnist.h5\"\n if not fname.exists():\n print(\"Downloading binary MNIST data...\")\n download_binary_mnist(fname)\n f = h5py.File(str(fname), \"r\")\n x_train = f[\"train\"][::]\n x_val = f[\"valid\"][::]\n x_test = f[\"test\"][::]\n train = torch.utils.data.TensorDataset(torch.from_numpy(x_train))\n train_loader = torch.utils.data.DataLoader(\n train, batch_size=cfg.batch_size, shuffle=True, **kwcfg\n )\n validation = torch.utils.data.TensorDataset(torch.from_numpy(x_val))\n val_loader = torch.utils.data.DataLoader(\n validation, batch_size=cfg.test_batch_size, shuffle=False\n )\n test = torch.utils.data.TensorDataset(torch.from_numpy(x_test))\n test_loader = torch.utils.data.DataLoader(\n test, batch_size=cfg.test_batch_size, shuffle=False\n )\n return train_loader, val_loader, test_loader\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Christian Heider Nielsen\"\n__doc__ = r\"\"\"\n\n Created on 22/03/2020\n \"\"\"\n\nimport copy\nfrom collections import namedtuple\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Any, List, Mapping, Sequence, Tuple, Union\n\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom numpy.core.multiarray import ndarray\nfrom pycocotools import mask\nfrom pycocotools.coco import COCO\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Compose\n\n__all__ = [\n \"FilterAndRemapCocoCategories\",\n \"convert_coco_poly_to_mask\",\n \"ConvertCocoPolysToMask\",\n \"_coco_remove_images_without_annotations\",\n \"convert_to_coco_api\",\n \"get_coco_api_from_dataset\",\n \"CocoDetection\",\n \"get_coco_ins\",\n \"get_coco_kp\",\n \"CocoMask\",\n \"CocoPolyAnnotation\",\n \"CocoModeEnum\",\n]\n\nfrom draugr.torch_utilities import Split\nfrom draugr.torch_utilities.tensors.tensor_container import NamedTensorTuple\n\n\nclass CocoModeEnum(Enum):\n instances = \"instances\"\n person_keypoints = \"person_keypoints\"\n\n\nCocoPolyAnnotation = namedtuple(\n \"CocoPolyAno\",\n (\n \"image_id\",\n \"bbox\",\n \"category_id\",\n \"area\",\n \"iscrowd\",\n \"id\",\n \"segmentation\",\n \"keypoints\",\n \"num_keypoints\",\n ),\n)\nCocoMask = namedtuple(\n \"CocoMask\", (\"boxes\", \"labels\", \"masks\", \"image_id\", \"area\", \"iscrowd\", \"keypoints\")\n)\n\n\nclass FilterAndRemapCocoCategories(object):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, categories: List[str], remap: bool = True):\n self._categories = categories\n self._remap = remap\n\n def __call__(self, image, target: Mapping[str, Any]) -> Tuple:\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self._categories]\n if not self._remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self._categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target\n\n\ndef convert_coco_poly_to_mask(\n segmentations: Sequence, height: int, width: int\n) -> NamedTensorTuple:\n \"\"\"\n\n :param segmentations:\n :type segmentations:\n :param height:\n :type height:\n :param width:\n :type width:\n :return:\n :rtype:\n \"\"\"\n masks = []\n for polygons in segmentations:\n rles = mask.frPyObjects(polygons, height, width)\n mask = mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return NamedTensorTuple(masks=masks)\n\n\nclass ConvertCocoPolysToMask(object):\n def __call__(self, image: ndarray, target: Mapping[str, Any]) -> Tuple:\n w, h = image.size\n\n image_id = torch.tensor([target[\"image_id\"]])\n\n anno = [obj for obj in target[\"annotations\"] if obj.iscrowd == 0]\n boxes = [obj.BoundingBox for obj in anno]\n\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = torch.tensor([obj.category_id for obj in anno], dtype=torch.int64)\n\n masks = convert_coco_poly_to_mask([obj.segmentation for obj in anno], h, w)\n\n keypoints = None\n if anno and anno[0].Keypoints is not None:\n keypoints = [obj.Keypoints for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n\n target = CocoMask(\n boxes=boxes[keep],\n labels=classes[keep],\n masks=masks[keep],\n image_id=image_id,\n area=torch.tensor([obj.area for obj in anno]),\n iscrowd=torch.tensor([obj.iscrowd for obj in anno]),\n keypoints=None,\n )\n\n if keypoints is not None:\n target.keypoints = keypoints[keep]\n\n return image, target\n\n\ndef _coco_remove_images_without_annotations(\n dataset: Dataset,\n category_list: Sequence[CocoPolyAnnotation] = None,\n min_keypoints_per_image: int = 10,\n) -> Dataset:\n def _has_only_empty_bbox(anno: List[CocoPolyAnnotation]) -> bool:\n return all(any(o <= 1 for o in obj.bbox[2:]) for obj in anno)\n\n def _count_visible_keypoints(anno: List[CocoPolyAnnotation]) -> int:\n return sum(sum(1 for v in ann.keypoints[2::3] if v > 0) for ann in anno)\n\n def _has_valid_annotation(anno: List[CocoPolyAnnotation]) -> bool:\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if anno[0].keypoints is None:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n assert isinstance(dataset, torchvision.datasets.CocoDetection)\n ids = []\n for ds_idx, img_id in enumerate(dataset.ids):\n ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = dataset.coco.loadAnns(ann_ids)\n if category_list:\n anno = [obj for obj in anno if obj.category_id in category_list]\n if _has_valid_annotation(anno):\n ids.append(ds_idx)\n\n dataset = torch.utils.data.Subset(dataset, ids)\n return dataset\n\n\ndef convert_to_coco_api(ds):\n \"\"\"\n\n :param ds:\n :type ds:\n :return:\n :rtype:\n \"\"\"\n coco_ds = COCO()\n ann_id = 0\n dataset = {\"images\": [], \"categories\": [], \"annotations\": []}\n categories = set()\n for img_idx in range(len(ds)):\n # find better way to get target\n # targets = ds.get_annotations(img_idx)\n img, targets = ds[img_idx]\n image_id = targets[\"image_id\"].item()\n dataset[\"images\"].append(\n {\"id\": image_id, \"height\": img.shape[-2], \"width\": img.shape[-1]}\n )\n bboxes = targets[\"boxes\"]\n bboxes[:, 2:] -= bboxes[:, :2]\n bboxes = bboxes.tolist()\n labels = targets[\"labels\"].tolist()\n areas = targets[\"area\"].tolist()\n iscrowd = targets[\"iscrowd\"].tolist()\n if \"masks\" in targets:\n masks = targets[\"masks\"]\n # make masks Fortran contiguous for coco_mask\n masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n if \"keypoints\" in targets:\n keypoints = targets[\"keypoints\"]\n keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()\n num_objs = len(bboxes)\n for i in range(num_objs):\n ann = CocoPolyAnnotation(\n image_id=image_id,\n bbox=bboxes[i],\n category_id=labels[i],\n area=areas[i],\n iscrowd=iscrowd[i],\n id=ann_id,\n segmentation=None,\n keypoints=None,\n num_keypoints=None,\n )\n categories.add(labels[i])\n if \"masks\" in targets:\n ann.segmentation = mask.encode(masks[i].numpy())\n if \"keypoints\" in targets:\n ann.keypoints = keypoints[i]\n ann.num_keypoints = sum(k != 0 for k in keypoints[i][2::3])\n dataset[\"annotations\"].append(ann)\n ann_id += 1\n dataset[\"categories\"] = [{\"id\": i} for i in sorted(categories)]\n coco_ds.dataset = dataset\n coco_ds.createIndex()\n return coco_ds\n\n\ndef get_coco_api_from_dataset(\n dataset: Union[torch.utils.data.Subset, torchvision.datasets.CocoDetection]\n) -> COCO:\n \"\"\"\n\n :param dataset:\n :type dataset:\n :return:\n :rtype:\n \"\"\"\n for i in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n return dataset.coco\n return convert_to_coco_api(dataset)\n\n\nclass CocoDetection(torchvision.datasets.CocoDetection):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, img_folder, ann_file, transforms):\n super(CocoDetection, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n\n def __getitem__(self, idx):\n img, target = super().__getitem__(idx)\n image_id = self.ids[idx]\n target = NamedTensorTuple(image_id=image_id, annotations=target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n\ndef get_coco_ins(\n root_path: Path,\n image_set: Split,\n transforms,\n mode: CocoModeEnum = CocoModeEnum.instances,\n):\n \"\"\"\n\n:param root_path:\n:type root_path:\n:param image_set:\n:type image_set:\n:param transforms:\n:type transforms:\n:param mode:\n:type mode:\n:return:\n:rtype:\n\"\"\"\n assert image_set in Split\n assert image_set != Split.Testing\n\n annotations_path = Path(\"annotations\")\n PATHS = {\n Split.Training: (\"train2017\", annotations_path / f\"{mode}_{'train'}2017.json\"),\n Split.Validation: (\"val2017\", annotations_path / f\"{mode}_{'val'}2017.json\"),\n }\n\n t = [ConvertCocoPolysToMask()]\n\n if transforms is not None:\n t.append(transforms)\n transforms = Compose(t)\n\n img_folder, ann_file = PATHS[image_set]\n\n dataset = CocoDetection(\n root_path / img_folder, root_path / ann_file, transforms=transforms\n )\n\n if image_set == Split.Training:\n dataset = _coco_remove_images_without_annotations(dataset)\n\n # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])\n\n return dataset\n\n\ndef get_coco_kp(root, image_set, transforms):\n \"\"\"\n\n :param root:\n :type root:\n :param image_set:\n :type image_set:\n :param transforms:\n :type transforms:\n :return:\n :rtype:\n \"\"\"\n return get_coco_ins(root, image_set, transforms, mode=CocoModeEnum.person_keypoints)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.from_numpy" ], [ "torch.zeros", "torch.stack", "torch.tensor", "torch.utils.data.Subset", "torch.as_tensor" ] ]
dantehustg/cem
[ "5fcb22d395c124c63cd26585aba5321293882ece" ]
[ "tests/test_backend.py" ]
[ "import pytest\nimport numpy as np\nimport random\n\nfrom cem.backend import backend, NumpyBackend\n\ntry:\n from cem.backend import CuPyBackend\n import cupy as cp\n skip_cupy_test = False\nexcept ImportError:\n skip_cupy_test = True\n\n\ndef test_numpy_backend():\n X = random.randint(0, 10) * 10\n Y = random.randint(0, 10) * 10\n\n zeros = backend.zeros((X, Y))\n ones = backend.ones((X, Y))\n\n assert isinstance(backend, NumpyBackend)\n assert isinstance(zeros, np.ndarray)\n assert isinstance(ones, np.ndarray)\n assert backend.int == np.int64\n assert backend.float == np.float64\n assert zeros.shape == (X, Y)\n assert ones.shape == (X, Y)\n assert backend.sin(ones).any() == np.sin(ones).any()\n assert backend.cos(ones).any() == np.cos(ones).any()\n\n\n@pytest.mark.skipif(skip_cupy_test, reason='CuPy is not installed.')\ndef test_cupy_backend():\n backend.set_backend('cupy')\n X = random.randint(0, 10) * 10\n Y = random.randint(0, 10) * 10\n\n zeros = backend.zeros((X, Y))\n ones = backend.ones((X, Y))\n\n assert isinstance(backend, CuPyBackend)\n assert isinstance(zeros, cp.ndarray)\n assert isinstance(ones, cp.ndarray)\n assert backend.int == cp.int64\n assert backend.float == cp.float64\n assert zeros.shape == (X, Y)\n assert ones.shape == (X, Y)\n assert backend.sin(ones).all() == cp.sin(ones).all()\n assert backend.cos(ones).all() == cp.cos(ones).all()\n\n\n@pytest.mark.skipif(skip_cupy_test, reason='CuPy is not installed.')\ndef test_set_backend():\n backend.set_backend('numpy')\n assert isinstance(backend, NumpyBackend)\n backend.set_backend('cupy')\n assert isinstance(backend, CuPyBackend)\n" ]
[ [ "numpy.sin", "numpy.cos" ] ]
yanjingke/yolov4-keras
[ "251457723efc3b12b9744b6f5e0c25f4bee0d4e4", "251457723efc3b12b9744b6f5e0c25f4bee0d4e4" ]
[ "yolo.py", "nets/yolo4.py" ]
[ "import os\nimport numpy as np\nimport copy\nimport colorsys\nfrom timeit import default_timer as timer\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\nfrom nets.yolo4 import yolo_body,yolo_eval\nfrom utils.utils import letterbox_image\n#--------------------------------------------#\n# 使用自己训练好的模型预测需要修改2个参数\n# model_path和classes_path都需要修改!\n#--------------------------------------------#\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'model_data/yolo4_weight.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/coco_classes.txt',\n \"score\" : 0.5,\n \"iou\" : 0.3,\n # 显存比较小可以使用416x416\n # 显存比较大可以使用608x608\n \"model_image_size\" : (416, 416)\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n #---------------------------------------------------#\n # 初始化yolo\n #---------------------------------------------------#\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults)\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n #---------------------------------------------------#\n # 获得所有的分类\n #---------------------------------------------------#\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n #---------------------------------------------------#\n # 获得所有的先验框\n #---------------------------------------------------#\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n #---------------------------------------------------#\n # 获得所有的分类\n #---------------------------------------------------#\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n \n # 计算anchor数量\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n\n # 载入模型,如果原来的模型里已经包括了模型结构则直接载入。\n # 否则先构建模型再载入\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path)\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # 画框设置不同的颜色\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n\n # 打乱颜色\n np.random.seed(10101)\n np.random.shuffle(self.colors)\n np.random.seed(None)\n\n self.input_image_shape = K.placeholder(shape=(2, ))\n\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n num_classes, self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n #---------------------------------------------------#\n # 检测图片\n #---------------------------------------------------#\n def detect_image(self, image):\n start = timer()\n\n # 调整图片使其符合输入要求\n new_image_size = self.model_image_size\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n # 预测结果\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n # 设置字体\n font = ImageFont.truetype(font='font/simhei.ttf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n small_pic=[]\n\n for i, c in list(enumerate(out_classes)):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n top, left, bottom, right = box\n top = top - 5\n left = left - 5\n bottom = bottom + 5\n right = right + 5\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n\n # 画框框\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n label = label.encode('utf-8')\n print(label)\n \n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n", "from functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\nfrom nets.CSPdarknet53 import darknet_body\nfrom utils.utils import compose\n\n\n#--------------------------------------------------#\n# 单次卷积\n#--------------------------------------------------#\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\n#---------------------------------------------------#\n# 卷积块\n# DarknetConv2D + BatchNormalization + LeakyReLU\n#---------------------------------------------------#\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose( \n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\n#---------------------------------------------------#\n# 特征层->最后的输出\n#---------------------------------------------------#\ndef make_five_convs(x, num_filters):\n # 五次卷积\n x = DarknetConv2D_BN_Leaky(num_filters, (1,1))(x)\n x = DarknetConv2D_BN_Leaky(num_filters*2, (3,3))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (1,1))(x)\n x = DarknetConv2D_BN_Leaky(num_filters*2, (3,3))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (1,1))(x)\n return x\n\n#---------------------------------------------------#\n# 特征层->最后的输出\n#---------------------------------------------------#\ndef yolo_body(inputs, num_anchors, num_classes):\n # 生成darknet53的主干模型\n feat1,feat2,feat3 = darknet_body(inputs)\n\n # 第一个特征层\n # y1=(batch_size,13,13,3,85)\n P5 = DarknetConv2D_BN_Leaky(512, (1,1))(feat3)\n P5 = DarknetConv2D_BN_Leaky(1024, (3,3))(P5)\n P5 = DarknetConv2D_BN_Leaky(512, (1,1))(P5)\n # 使用了SPP结构,即不同尺度的最大池化后堆叠。\n maxpool1 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(P5)\n maxpool2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(P5)\n maxpool3 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(P5)\n P5 = Concatenate()([maxpool1, maxpool2, maxpool3, P5])\n P5 = DarknetConv2D_BN_Leaky(512, (1,1))(P5)\n P5 = DarknetConv2D_BN_Leaky(1024, (3,3))(P5)\n P5 = DarknetConv2D_BN_Leaky(512, (1,1))(P5)\n\n P5_upsample = compose(DarknetConv2D_BN_Leaky(256, (1,1)), UpSampling2D(2))(P5)\n \n P4 = DarknetConv2D_BN_Leaky(256, (1,1))(feat2)\n P4 = Concatenate()([P4, P5_upsample])\n P4 = make_five_convs(P4,256)\n\n P4_upsample = compose(DarknetConv2D_BN_Leaky(128, (1,1)), UpSampling2D(2))(P4)\n \n P3 = DarknetConv2D_BN_Leaky(128, (1,1))(feat1)\n P3 = Concatenate()([P3, P4_upsample])\n P3 = make_five_convs(P3,128)\n\n P3_output = DarknetConv2D_BN_Leaky(256, (3,3))(P3)\n P3_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P3_output)\n\n #26,26 output\n P3_downsample = ZeroPadding2D(((1,0),(1,0)))(P3)\n P3_downsample = DarknetConv2D_BN_Leaky(256, (3,3), strides=(2,2))(P3_downsample)\n P4 = Concatenate()([P3_downsample, P4])\n P4 = make_five_convs(P4,256)\n \n P4_output = DarknetConv2D_BN_Leaky(512, (3,3))(P4)\n P4_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P4_output)\n \n\n #13,13 output\n P4_downsample = ZeroPadding2D(((1,0),(1,0)))(P4)\n P4_downsample = DarknetConv2D_BN_Leaky(512, (3,3), strides=(2,2))(P4_downsample)\n P5 = Concatenate()([P4_downsample, P5])\n P5 = make_five_convs(P5,512)\n \n\n P5_output = DarknetConv2D_BN_Leaky(1024, (3,3))(P5)\n P5_output = DarknetConv2D(num_anchors*(num_classes+5), (1,1))(P5_output)\n\n return Model(inputs, [P5_output, P4_output, P3_output])\n\n#---------------------------------------------------#\n# 将预测值的每个特征层调成真实值\n#---------------------------------------------------#\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n num_anchors = len(anchors)\n # [1, 1, 1, num_anchors, 2]\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n # 获得x,y的网格\n # (13,13, 1, 2)\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n # (batch_size,13,13,3,85)\n feats = K.reshape(feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n # 将预测值调成真实值\n # box_xy对应框的中心点\n # box_wh对应框的宽和高\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n # 在计算loss的时候返回如下参数\n if calc_loss == True:\n return grid, feats, box_xy, box_wh\n return box_xy, box_wh, box_confidence, box_class_probs\n\n#---------------------------------------------------#\n# 对box进行调整,使其符合真实图片的样子\n#---------------------------------------------------#\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n \n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n#---------------------------------------------------#\n# 获取每个box和它的得分\n#---------------------------------------------------#\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n # 将预测值调成真实值\n # box_xy对应框的中心点\n # box_wh对应框的宽和高\n # -1,13,13,3,2; -1,13,13,3,2; -1,13,13,3,1; -1,13,13,3,80\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats, anchors, num_classes, input_shape)\n # 将box_xy、和box_wh调节成y_min,y_max,xmin,xmax\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n # 获得得分和box\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n#---------------------------------------------------#\n# 图片预测\n#---------------------------------------------------#\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n # 获得特征层的数量\n num_layers = len(yolo_outputs)\n # 特征层1对应的anchor是678\n # 特征层2对应的anchor是345\n # 特征层3对应的anchor是012\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]\n \n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n # 对每个特征层进行处理\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n # 将每个特征层的结果进行堆叠\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # 取出所有box_scores >= score_threshold的框,和成绩\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n\n # 非极大抑制,去掉box重合程度高的那一些\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n\n # 获取非极大抑制后的结果\n # 下列三个分别是\n # 框的位置,得分与种类\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\n" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.random.shuffle", "numpy.expand_dims", "numpy.floor" ], [ "tensorflow.boolean_mask", "tensorflow.image.non_max_suppression" ] ]