repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
caixunshiren/pytorch-metal-oxide-memristor-crossbar
|
[
"ef48468910fba455ccc58709e336c58c862a3cb1"
] |
[
"memristor/devices.py"
] |
[
"import yaml\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\npath = Path(__file__).parent\nwith open(path / \"params.yaml\", 'r') as stream:\n CONFIG = yaml.safe_load(stream)\n PARAMS = CONFIG[\"StaticParameters\"]\n\nK_b = 1.38065e-23 # Boltzmann constant\n\nHEADER = [\"c0_set\", \"c1_set\", \"c2_set\", \"c3_set\", \"c4_set\", \"d0_set\", \"d1_set\", \"d2_set\", \"d3_set\", \"d4_set\",\n \"c0_reset\", \"c1_reset\", \"c2_reset\", \"c3_reset\", \"c4_reset\", \"d0_reset\", \"d1_reset\", \"d2_reset\", \"d3_reset\",\n \"d4_reset\"]\nDYNAMIC_PARAMS = pd.read_csv(path/'dynamic_params.txt', sep=\" \", index_col=0, header=None)\nDYNAMIC_PARAMS.columns = HEADER\n\n\nclass StaticMemristor:\n def __init__(self, g_0):\n \"\"\"\n :param g_0: device conductance in S, normally 1uS to 100 uS\n \"\"\"\n self.g_0 = g_0\n self.t = None\n self.f = None\n self.u_A1 = None\n self.u_A3 = None\n self.sigma_A1 = None\n self.sigma_A3 = None\n self.d2d_var = np.random.normal(0, 1, 1).item()\n\n def noise_free_dc_iv_curve(self, v):\n return self.u_A1 * v + self.u_A3 * v ** 3\n\n def d2d_variation(self, v):\n return self.d2d_var * (self.sigma_A1 * v + self.sigma_A3 * v ** 3)\n\n def temporal_variation(self, v, i_spacial):\n return np.random.normal(0, 4 * K_b * self.t * self.f * i_spacial / v, 1).item()\n\n def calibrate(self, t, f):\n self.t = t\n self.f = f\n self.u_A1 = PARAMS['A1']['a0'] + PARAMS['A1']['a1'] * self.g_0 + PARAMS['A1']['a2'] * self.t\n self.u_A3 = PARAMS['A3']['a0'] * self.g_0 + PARAMS['A3']['a1'] * self.g_0 ** 2 + \\\n PARAMS['A3']['a2'] * self.t ** (-1.33)\n self.sigma_A1 = PARAMS['A1']['p0'] + PARAMS['A1']['p1'] * self.g_0 + PARAMS['A1']['p2'] * self.t + \\\n PARAMS['A1']['p3'] * self.g_0 ** 2\n self.sigma_A3 = PARAMS['A3']['p0'] + PARAMS['A3']['p1'] * self.g_0 + PARAMS['A3']['p2'] * self.t + \\\n PARAMS['A3']['p3'] * self.g_0 ** 2 + PARAMS['A3']['p4'] * self.g_0 * self.t\n\n def inference(self, v):\n \"\"\"\n :param v: applied voltage\n :param t: ambient temperature\n :param f: frequency\n :return: output current i\n \"\"\"\n i_spacial = self.noise_free_dc_iv_curve(v) + self.d2d_variation(v)\n i = i_spacial + self.temporal_variation(v, i_spacial)\n return i\n\n\nclass DynamicMemristor(StaticMemristor):\n def __init__(self, g_0):\n super().__init__(g_0)\n self.g_range = [-1,\n -1] # conduction range, [left_limit, right_limit], need this because parameters is calibrated\n # in terms of range. Set to dumb value first.\n self.params = None\n self.get_params()\n self.dynamic_d2d_var = np.random.normal(0, 1, 1).item()\n\n\n def get_params(self):\n assert 3.16 * 10e-6 <= self.g_0 <= 316 * 10e-6, \"conductance out of range\"\n if self.g_range[0] <= self.g_0 <= self.g_range[1]:\n return\n # 1. find the appropriate range\n # 2. get params based on range\n for index, row in DYNAMIC_PARAMS.iterrows():\n llimit, rlimit = index.split(\"-\")\n llimit = float(llimit)*10e-6 # caste type and in us\n rlimit = float(rlimit)*10e-6 # caste type and in us\n if llimit <= self.g_0 <= rlimit:\n print('debug')\n self.g_range = [llimit, rlimit]\n self.params = row.to_dict()\n\n def set(self, V_p, t_p):\n self.get_params()\n logT = np.log(t_p)\n D_m = self.params[\"c0_set\"] * (1-np.tanh(self.params[\"c1_set\"]*(logT-self.params[\"c2_set\"]))) \\\n * (np.tanh(self.params[\"c3_set\"]*V_p-self.params[\"c4_set\"])+1)\n D_d2d = self.dynamic_d2d_var * D_m * (self.params[\"d0_set\"] + self.params[\"d1_set\"]*logT**2 +\n self.params[\"d2_set\"]*V_p*logT + self.params[\"d3_set\"]*(V_p**2)*logT +\n self.params[\"d4_set\"]*V_p**3)\n self.g_0 += (D_m + D_d2d)\n if self.g_0 > 316 * 10e-6:\n self.g_0 = 316 * 10e-6\n\n def reset(self, V_p, t_p):\n self.get_params()\n logT = np.log(t_p)\n D_m = self.params[\"c0_reset\"] * (-1 - np.tanh(self.params[\"c1_reset\"] * (logT - self.params[\"c2_reset\"]))) \\\n * (np.tanh(self.params[\"c3_reset\"] * V_p - self.params[\"c4_reset\"]) - 1)\n D_d2d = self.dynamic_d2d_var * D_m * (self.params[\"d0_reset\"] + self.params[\"d1_reset\"] * logT ** 2 +\n self.params[\"d2_reset\"] * V_p * logT + self.params[\"d3_reset\"] * (\n V_p ** 2) * logT +\n self.params[\"d4_reset\"] * V_p ** 3)\n self.g_0 += (D_m + D_d2d)\n if 3.16 * 10e-6 > self.g_0:\n self.g_0 = 3.16 * 10e-6\n\n# TODO: Qs for Amirali - T is Celcius or Kelvin? Role of frequency and appropriete value?\n# TODO: Dynamic memristors\n# TODO: Crossbar static\n# TODO: Crossbar dynamic\n"
] |
[
[
"numpy.log",
"pandas.read_csv",
"numpy.tanh",
"numpy.random.normal"
]
] |
RaISy-Net/Intelligent_picking
|
[
"7168b9ccd1f66c26367a534bddac3c8b2f5dc192"
] |
[
"src/utils/visualisation/plot.py"
] |
[
"import warnings\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom src.utils.dataset_processing.grasp import detect_grasps\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef plot_results(\n fig,\n rgb_img,\n grasp_q_img,\n grasp_angle_img,\n depth_img=None,\n no_grasps=1,\n grasp_width_img=None\n):\n \"\"\"\n Plot the output of a network\n :param fig: Figure to plot the output\n :param rgb_img: RGB Image\n :param depth_img: Depth Image\n :param grasp_q_img: Q output of network\n :param grasp_angle_img: Angle output of network\n :param no_grasps: Maximum number of grasps to plot\n :param grasp_width_img: (optional) Width output of network\n :return:\n \"\"\"\n gs = detect_grasps(grasp_q_img, grasp_angle_img, width_img=grasp_width_img, no_grasps=no_grasps)\n \n\n plt.ion()\n plt.clf()\n ax = fig.add_subplot(1, 5, 1)\n ax.imshow(rgb_img)\n ax.set_title('RGB')\n ax.axis('off')\n\n if depth_img is not None:\n ax = fig.add_subplot(2, 3, 2)\n ax.imshow(depth_img, cmap='gray')\n ax.set_title('Depth')\n ax.axis('off')\n\n ax = fig.add_subplot(1, 5, 2)\n ax.imshow(rgb_img)\n for g in gs:\n g.plot(ax)\n ax.set_title('Grasp')\n ax.axis('off')\n\n ax = fig.add_subplot(1, 5, 3)\n plot = ax.imshow(grasp_q_img, cmap='jet', vmin=0, vmax=1)\n ax.set_title('Q')\n ax.axis('off')\n plt.colorbar(plot)\n\n ax = fig.add_subplot(1, 5, 4)\n plot = ax.imshow(grasp_angle_img, cmap='hsv', vmin=-np.pi / 2, vmax=np.pi / 2)\n ax.set_title('Angle')\n ax.axis('off')\n plt.colorbar(plot)\n\n ax = fig.add_subplot(1, 5, 5)\n plot = ax.imshow(grasp_width_img, cmap='jet', vmin=0, vmax=100)\n ax.set_title('Width')\n ax.axis('off')\n plt.colorbar(plot)\n\n plt.pause(0.1)\n fig.canvas.draw()\n return gs\n\n\ndef plot_grasp(\n fig,\n grasps=None,\n save=False,\n rgb_img=None,\n grasp_q_img=None,\n grasp_angle_img=None,\n no_grasps=1,\n grasp_width_img=None\n):\n \"\"\"\n Plot the output grasp of a network\n :param fig: Figure to plot the output\n :param grasps: grasp pose(s)\n :param save: Bool for saving the plot\n :param rgb_img: RGB Image\n :param grasp_q_img: Q output of network\n :param grasp_angle_img: Angle output of network\n :param no_grasps: Maximum number of grasps to plot\n :param grasp_width_img: (optional) Width output of network\n :return:\n \"\"\"\n if grasps is None:\n grasps = detect_grasps(grasp_q_img, grasp_angle_img, width_img=grasp_width_img, no_grasps=no_grasps)\n\n plt.ion()\n plt.clf()\n\n ax = plt.subplot(111)\n ax.imshow(rgb_img)\n for g in grasps:\n g.plot(ax)\n ax.set_title('Grasp')\n ax.axis('off')\n\n plt.pause(0.1)\n fig.canvas.draw()\n\n if save:\n time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n fig.savefig('results/{}.png'.format(time))\n\n\ndef save_results(x,rgb_img, grasp_q_img, grasp_angle_img, depth_img=None, no_grasps=1, grasp_width_img=None):\n \"\"\"\n Plot the output of a network\n :param rgb_img: RGB Image\n :param depth_img: Depth Image\n :param grasp_q_img: Q output of network\n :param grasp_angle_img: Angle output of network\n :param no_grasps: Maximum number of grasps to plot\n :param grasp_width_img: (optional) Width output of network\n :return:\n \"\"\"\n gs = detect_grasps(grasp_q_img, grasp_angle_img, width_img=grasp_width_img, no_grasps=no_grasps)\n '''\n fig = plt.figure(figsize=(10, 10))\n plt.ion()\n plt.clf()\n ax = plt.subplot(111)\n ax.imshow(rgb_img)\n ax.set_title('RGB')\n ax.axis('off')\n fig.savefig('results/rgb.png')\n\n if depth_img.any():\n fig = plt.figure(figsize=(10, 10))\n plt.ion()\n plt.clf()\n ax = plt.subplot(111)\n ax.imshow(depth_img, cmap='gray')\n for g in gs:\n g.plot(ax)\n ax.set_title('Depth')\n ax.axis('off')\n fig.savefig('results/depth.png')\n '''\n fig = plt.figure(figsize=(5, 5))\n plt.ion()\n plt.clf()\n ax = plt.subplot(111)\n ax.imshow(rgb_img)\n for g in gs:\n g.plot(ax)\n ax.set_title('Grasp')\n ax.axis('off')\n fig.savefig('results/grasp'+str(x)+'.png')\n plt.close()\n '''\n fig = plt.figure(figsize=(10, 10))\n plt.ion()\n plt.clf()\n ax = plt.subplot(111)\n plot = ax.imshow(grasp_q_img, cmap='jet', vmin=0, vmax=1)\n ax.set_title('Q')\n ax.axis('off')\n plt.colorbar(plot)\n fig.savefig('results/quality.png')\n\n fig = plt.figure(figsize=(10, 10))\n plt.ion()\n plt.clf()\n ax = plt.subplot(111)\n plot = ax.imshow(grasp_angle_img, cmap='hsv', vmin=-np.pi / 2, vmax=np.pi / 2)\n ax.set_title('Angle')\n ax.axis('off')\n plt.colorbar(plot)\n fig.savefig('results/angle.png')\n\n fig = plt.figure(figsize=(10, 10))\n plt.ion()\n plt.clf()\n ax = plt.subplot(111)\n plot = ax.imshow(grasp_width_img, cmap='jet', vmin=0, vmax=100)\n ax.set_title('Width')\n ax.axis('off')\n plt.colorbar(plot)\n fig.savefig('results/width.png')\n\n fig.canvas.draw()\n plt.close(fig)\n '''\n"
] |
[
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
krantirk/Transformers
|
[
"4054335fc6910c8aaed3b947bd4e3bfc2ced6370"
] |
[
"transformers/modeling_t5.py"
] |
[
"# coding=utf-8\n# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch T5 model. \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport math\nimport os\nimport sys\nimport copy\nimport itertools\nfrom io import open\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .modeling_utils import PreTrainedModel, Bridges, prune_linear_layer\nfrom .configuration_t5 import T5Config\nfrom .file_utils import add_start_docstrings, DUMMY_INPUTS, DUMMY_MASK\n\nlogger = logging.getLogger(__name__)\n\n####################################################\n# This dict contrains shortcut names and associated url\n# for the pretrained weights provided with the models\n####################################################\nT5_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 't5-small': \"https://s3.amazonaws.com/models.huggingface.co/bert/t5-small-pytorch_model.bin\",\n 't5-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/t5-base-pytorch_model.bin\",\n 't5-large': \"https://s3.amazonaws.com/models.huggingface.co/bert/t5-large-pytorch_model.bin\",\n 't5-3B': \"https://s3.amazonaws.com/models.huggingface.co/bert/t5-3B-pytorch_model.bin\",\n 't5-11B': \"https://s3.amazonaws.com/models.huggingface.co/bert/t5-11B-pytorch_model.bin\",\n}\n\n####################################################\n# This is a conversion method from TF 1.0 to PyTorch\n# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28\n####################################################\ndef load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split('/')\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(n in [\"adam_v\", \"adam_m\", \"global_step\"] for n in name):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if '_slot_' in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+_\\d+', m_name):\n l = re.split(r'_(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] in ['kernel', 'scale', 'embedding']:\n pointer = getattr(pointer, 'weight')\n # elif l[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif l[0] == 'output_bias' or l[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif l[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, l[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n if l[0] not in ['kernel', 'scale', 'embedding']:\n pointer = getattr(pointer, 'weight')\n if l[0] != 'embedding':\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model\n\n\n####################################################\n# PyTorch Models are constructed by sub-classing\n# - torch.nn.Module for the layers and\n# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)\n####################################################\n\nclass T5LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-6):\n \"\"\" Construct a layernorm module in the T5 style\n No bias and no substraction of mean.\n \"\"\"\n super(T5LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n variance = x.pow(2).mean(-1, keepdim=True)\n x = x / torch.sqrt(variance + self.variance_epsilon)\n return self.weight * x\n\n\nclass T5DenseReluDense(nn.Module):\n def __init__(self, config):\n super(T5DenseReluDense, self).__init__()\n self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)\n self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n h = self.wi(hidden_states)\n h = F.relu(h)\n h = self.dropout(h)\n h = self.wo(h)\n return h\n\n\nclass T5LayerFF(nn.Module):\n def __init__(self, config):\n super(T5LayerFF, self).__init__()\n self.DenseReluDense = T5DenseReluDense(config)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n norm_x = self.layer_norm(hidden_states)\n y = self.DenseReluDense(norm_x)\n layer_output = hidden_states + self.dropout(y)\n return layer_output\n\n\nclass T5Attention(nn.Module):\n NEW_ID = itertools.count()\n\n def __init__(self, config, has_relative_attention_bias=False):\n super(T5Attention, self).__init__()\n self.layer_id = next(T5Attention.NEW_ID)\n self.is_decoder = config.is_decoder\n self.has_relative_attention_bias = has_relative_attention_bias\n\n self.output_attentions = config.output_attentions\n self.relative_attention_num_buckets = config.relative_attention_num_buckets\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n self.dropout = config.dropout_rate\n self.inner_dim = self.n_heads * self.d_kv\n\n # Mesh TensorFlow initialization to avoid scaling before softmax\n self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n if self.has_relative_attention_bias:\n self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.n_heads, self.d_kv)\n heads = set(heads) - self.pruned_heads\n for head in heads:\n head -= sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n # Prune linear layers\n self.q = prune_linear_layer(self.q, index)\n self.k = prune_linear_layer(self.k, index)\n self.v = prune_linear_layer(self.v, index)\n self.o = prune_linear_layer(self.o, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.inner_dim = self.d_kv * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n @staticmethod\n def _relative_position_bucket(relative_position,\n bidirectional=True,\n num_buckets=32,\n max_distance=128):\n \"\"\"\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n\n Translate relative position to a bucket number for relative attention.\n The relative position is defined as memory_position - query_position, i.e.\n the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are\n invalid.\n We use smaller buckets for small absolute relative_position and larger buckets\n for larger absolute relative_positions. All relative positions >=max_distance\n map to the same bucket. All relative positions <=-max_distance map to the\n same bucket. This should allow for more graceful generalization to longer\n sequences than the model has been trained on.\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n Returns:\n a Tensor with the same shape as relative_position, containing int32\n values in the range [0, num_buckets)\n \"\"\"\n ret = 0\n n = -relative_position\n if bidirectional:\n num_buckets //= 2\n ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets\n n = torch.abs(n)\n else:\n n = torch.max(n, torch.zeros_like(n))\n # now n is in the range [0, inf)\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = (n < max_exact)\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n val_if_large = max_exact + (\n torch.log(n.float() / max_exact)\n / math.log(max_distance / max_exact) * (num_buckets - max_exact)).to(torch.long)\n val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))\n\n ret += torch.where(is_small, n, val_if_large)\n return ret\n\n def compute_bias(self, qlen, klen):\n \"\"\" Compute binned relative position bias \"\"\"\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(relative_position, # shape (qlen, klen)\n bidirectional=not self.is_decoder,\n num_buckets=self.relative_attention_num_buckets)\n values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)\n return values\n\n def forward(self, input, mask=None, kv=None, position_bias=None, cache=None, head_mask=None):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = input.size()\n if kv is None:\n klen = qlen if cache is None else cache['slen'] + qlen\n else:\n klen = kv.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)\n\n q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)\n if kv is None:\n k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n cache[self.layer_id] = (k, v)\n\n # q = q / math.sqrt(dim_per_head) # No scaling in T5\n scores = torch.einsum('bnqd,bnkd->bnqk', q, k) # (bs, n_heads, qlen, klen)\n\n if position_bias is None:\n if not self.has_relative_attention_bias:\n raise ValueError(\"No position_bias provided and no weights to compute position_bias\")\n position_bias = self.compute_bias(qlen, klen)\n if mask is not None:\n position_bias = position_bias + mask # (bs, n_heads, qlen, klen)\n\n scores += position_bias\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n context = self.o(context)\n\n outputs = (context,)\n if self.output_attentions:\n outputs = outputs + (weights,)\n if self.has_relative_attention_bias:\n outputs = outputs + (position_bias,)\n return outputs\n\n\nclass T5LayerSelfAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super(T5LayerSelfAttention, self).__init__()\n self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, head_mask=None):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.SelfAttention(norm_x,\n mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask)\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5LayerCrossAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super(T5LayerCrossAttention, self).__init__()\n self.EncDecAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states, kv, attention_mask=None, position_bias=None, head_mask=None):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.EncDecAttention(norm_x,\n mask=attention_mask,\n kv=kv,\n position_bias=position_bias,\n head_mask=head_mask)\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5Block(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super(T5Block, self).__init__()\n self.is_decoder = config.is_decoder\n self.layer = nn.ModuleList()\n self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n if self.is_decoder:\n self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n self.layer.append(T5LayerFF(config))\n else:\n self.layer.append(T5LayerFF(config))\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None,\n encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None,\n head_mask=None):\n self_attention_outputs = self.layer[0](hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask)\n hidden_states = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights\n\n if not self.is_decoder:\n hidden_states = self.layer[1](hidden_states)\n else:\n cross_attention_outputs = self.layer[1](hidden_states,\n kv=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n position_bias=encoder_decoder_position_bias,\n head_mask=head_mask)\n hidden_states = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:] # Keep cross-attention outputs and relative position weights\n hidden_states = self.layer[2](hidden_states)\n\n outputs = (hidden_states,) + outputs # add attentions if we output them\n return outputs # hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n\n\nclass T5PreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n config_class = T5Config\n pretrained_model_archive_map = T5_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = load_tf_weights_in_t5\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {'decoder_input_ids': input_ids,\n 'encoder_input_ids': input_ids,\n 'decoder_attention_mask': input_mask}\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n factor = self.config.initializer_factor # Used for testing weights initialization\n if isinstance(module, T5LayerNorm):\n module.weight.data.fill_(factor*1.0)\n elif isinstance(module, (T5Model, T5WithLMHeadModel)):\n # Mesh TensorFlow embeddings initialization\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624\n module.shared.weight.data.normal_(mean=0.0, std=factor*1.0)\n elif isinstance(module, T5DenseReluDense):\n # Mesh TensorFlow FF initialization\n # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56\n # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89\n module.wi.weight.data.normal_(mean=0.0, std=factor*((self.config.d_model) ** -0.5))\n if hasattr(module.wi, 'bias') and module.wi.bias is not None:\n module.wi.bias.data.zero_()\n module.wo.weight.data.normal_(mean=0.0, std=factor*((self.config.d_ff) ** -0.5))\n if hasattr(module.wo, 'bias') and module.wo.bias is not None:\n module.wo.bias.data.zero_()\n elif isinstance(module, T5Attention):\n # Mesh TensorFlow attention initialization to avoid scaling before softmax\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n module.q.weight.data.normal_(mean=0.0, std=factor*((d_model * d_kv) ** -0.5))\n module.k.weight.data.normal_(mean=0.0, std=factor*(d_model ** -0.5))\n module.v.weight.data.normal_(mean=0.0, std=factor*(d_model ** -0.5))\n module.o.weight.data.normal_(mean=0.0, std=factor*((n_heads * d_kv) ** -0.5))\n if module.has_relative_attention_bias:\n module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor*((d_model) ** -0.5))\n\n\nclass T5Stack(T5PreTrainedModel):\n def __init__(self, config):\n super(T5Stack, self).__init__(config)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.is_decoder = config.is_decoder\n\n self.block = nn.ModuleList([T5Block(config, has_relative_attention_bias=bool(i == 0))\n for i in range(config.num_layers)])\n\n self.bridges = Bridges.get_layer2layer_bridges(config)\n\n self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n self.init_weights()\n\n def forward(self,\n hidden_states,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None):\n\n batch_size, seq_length = hidden_states.shape[0], hidden_states.shape[1]\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, seq_length).to(hidden_states.device)\n if self.is_decoder and encoder_attention_mask is None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(batch_size, encoder_seq_length).to(hidden_states.device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder:\n seq_ids = torch.arange(seq_length, device=hidden_states.device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n causal_mask = causal_mask.to(attention_mask)\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -1e9 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition\n # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270\n # extended_attention_mask = (extended_attention_mask == extended_attention_mask.transpose(-1, -2))\n\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -1e9\n\n if self.is_decoder:\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]\n if encoder_attention_mask.dim() == 3:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n if encoder_attention_mask.dim() == 2:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n\n # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition\n # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270\n # encoder_extended_attention_mask = (encoder_extended_attention_mask == encoder_extended_attention_mask.transpose(-1, -2))\n\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_layers\n\n all_hidden_states = ()\n all_attentions = ()\n position_bias = None\n encoder_decoder_position_bias = None\n\n hidden_states = self.dropout(hidden_states)\n for i, layer_module in enumerate(self.block):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(hidden_states,\n attention_mask=extended_attention_mask,\n position_bias=position_bias,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n encoder_decoder_position_bias=encoder_decoder_position_bias,\n head_mask=head_mask[i])\n # layer_outputs is a tuple with:\n # hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n hidden_states = layer_outputs[0]\n if i == 0:\n # We share the position biases between the layers - the first layer store them\n # layer_outputs = hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n position_bias = layer_outputs[2 if self.output_attentions else 1]\n if self.is_decoder:\n encoder_decoder_position_bias = layer_outputs[4 if self.output_attentions else 2]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],) # We keep only self-attention weights for now\n\n hidden_states = self.final_layer_norm(hidden_states)\n layer_output = self.dropout(hidden_states)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nT5_START_DOCSTRING = r\"\"\" The T5 model was proposed in\n `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`_\n by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.\n It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting.\n\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n .. _`Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer`:\n https://arxiv.org/abs/1910.10683\n\n .. _`torch.nn.Module`:\n https://pytorch.org/docs/stable/nn.html#module\n\n Parameters:\n config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model. \n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nT5_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n To match pre-training, T5 input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs:\n\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``\n\n (b) For single sequences:\n\n ``tokens: [CLS] the dog is hairy . [SEP]``\n\n T5 is a model with relative position embeddings so you should be able to pad the inputs on\n the right or the left.\n\n Indices can be obtained using :class:`transformers.T5Tokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n\"\"\"\n\n@add_start_docstrings(\"The bare T5 Model transformer outputting raw hidden-states\"\n \"without any specific head on top.\",\n T5_START_DOCSTRING, T5_INPUTS_DOCSTRING)\nclass T5Model(T5PreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the model.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n model = T5Model.from_pretrained('t5-small')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n def __init__(self, config):\n super(T5Model, self).__init__(config)\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n self.encoder = T5Stack(encoder_config)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n self.decoder = T5Stack(decoder_config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def forward(self, **kwargs):\n # keyword arguments come in 3 flavors: encoder-specific (prefixed by\n # `encoder_`), decoder-specific (prefixed by `decoder_`) and those\n # that apply to the model as whole.\n # We let the specific kwargs override the common ones in case of conflict.\n kwargs_common = dict((k, v) for k, v in kwargs.items()\n if not k.startswith(\"encoder_\") and not k.startswith(\"decoder_\"))\n kwargs_encoder = kwargs_common.copy()\n kwargs_decoder = kwargs_common.copy()\n kwargs_encoder.update(dict((k[len(\"encoder_\"):], v) for k, v in kwargs.items() if k.startswith(\"encoder_\")))\n kwargs_decoder.update(dict((k[len(\"decoder_\"):], v) for k, v in kwargs.items() if k.startswith(\"decoder_\")))\n\n # Encode if needed (training, first prediction pass)\n encoder_hidden_states = kwargs_encoder.pop(\"hidden_states\", None)\n encoder_attention_mask = kwargs_encoder.get(\"attention_mask\", None)\n if encoder_hidden_states is None:\n # Convert encoder inputs in embeddings if needed\n hidden_states = kwargs_encoder.pop(\"inputs_embeds\", None)\n if hidden_states is None:\n encoder_inputs_ids = kwargs_encoder.pop(\"input_ids\")\n hidden_states = self.shared(encoder_inputs_ids) # Convert inputs in embeddings\n\n if encoder_attention_mask is not None:\n # Apply masking\n encoder_attention_mask = (encoder_attention_mask != 0).to(hidden_states)\n hidden_states = hidden_states * encoder_attention_mask.unsqueeze(-1)\n\n encoder_outputs = self.encoder(hidden_states, **kwargs_encoder)\n encoder_hidden_states = encoder_outputs[0]\n else:\n encoder_outputs = ()\n\n # Decode\n # Convert decoder inputs in embeddings if needed\n hidden_states = kwargs_decoder.pop(\"inputs_embeds\", None)\n if hidden_states is None:\n decoder_inputs_ids = kwargs_decoder.pop(\"input_ids\")\n hidden_states = self.shared(decoder_inputs_ids)\n\n kwargs_decoder[\"encoder_hidden_states\"] = encoder_hidden_states\n kwargs_decoder[\"encoder_attention_mask\"] = encoder_attention_mask\n decoder_outputs = self.decoder(hidden_states, **kwargs_decoder)\n\n return decoder_outputs + encoder_outputs\n\n\n@add_start_docstrings(\"\"\"T5 Model with a `language modeling` head on top. \"\"\",\n T5_START_DOCSTRING, T5_INPUTS_DOCSTRING)\nclass T5WithLMHeadModel(T5PreTrainedModel):\n r\"\"\"\n **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n model = T5WithLMHeadModel.from_pretrained('t5-small')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n def __init__(self, config):\n super(T5WithLMHeadModel, self).__init__(config)\n self.model_dim = config.d_model\n\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n self.encoder = T5Stack(encoder_config)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n self.decoder = T5Stack(decoder_config)\n\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def forward(self, **kwargs):\n # keyword arguments come in 3 flavors: encoder-specific (prefixed by\n # `encoder_`), decoder-specific (prefixed by `decoder_`) and those\n # that apply to the model as whole.\n # We let the specific kwargs override the common ones in case of conflict.\n\n lm_labels = kwargs.pop('decoder_lm_labels', None)\n\n kwargs_common = dict((k, v) for k, v in kwargs.items()\n if not k.startswith(\"encoder_\") and not k.startswith(\"decoder_\"))\n kwargs_encoder = kwargs_common.copy()\n kwargs_decoder = kwargs_common.copy()\n kwargs_encoder.update(dict((k[len(\"encoder_\"):], v) for k, v in kwargs.items() if k.startswith(\"encoder_\")))\n kwargs_decoder.update(dict((k[len(\"decoder_\"):], v) for k, v in kwargs.items() if k.startswith(\"decoder_\")))\n\n # Encode if needed (training, first prediction pass)\n encoder_hidden_states = kwargs_encoder.pop(\"hidden_states\", None)\n if encoder_hidden_states is None:\n # Convert encoder inputs in embeddings if needed\n hidden_states = kwargs_encoder.pop(\"inputs_embeds\", None)\n if hidden_states is None:\n encoder_inputs_ids = kwargs_encoder.pop(\"input_ids\")\n hidden_states = self.shared(encoder_inputs_ids) # Convert inputs in embeddings\n\n encoder_outputs = self.encoder(hidden_states, **kwargs_encoder)\n encoder_hidden_states = encoder_outputs[0]\n else:\n encoder_outputs = ()\n\n # Decode\n # Convert decoder inputs in embeddings if needed\n hidden_states = kwargs_decoder.pop(\"inputs_embeds\", None)\n if hidden_states is None:\n decoder_inputs_ids = kwargs_decoder.pop(\"input_ids\")\n hidden_states = self.shared(decoder_inputs_ids)\n\n kwargs_decoder[\"encoder_hidden_states\"] = encoder_hidden_states\n kwargs_decoder[\"encoder_attention_mask\"] = kwargs_encoder.get(\"attention_mask\", None)\n decoder_outputs = self.decoder(hidden_states, **kwargs_decoder)\n\n sequence_output = decoder_outputs[0]\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n lm_logits = self.lm_head(sequence_output)\n\n decoder_outputs = (lm_logits,) + decoder_outputs[1:] # Add hidden states and attention if they are here\n if lm_labels is not None:\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = lm_labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n decoder_outputs = (loss,) + decoder_outputs # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n\n return decoder_outputs + encoder_outputs\n"
] |
[
[
"torch.abs",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.Embedding",
"torch.where",
"torch.full_like",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.sqrt",
"torch.einsum",
"torch.tensor",
"torch.nn.functional.relu",
"torch.arange",
"tensorflow.train.list_variables",
"torch.nn.ModuleList",
"torch.zeros_like",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"numpy.transpose",
"torch.matmul"
]
] |
benfred/cudf
|
[
"3cd4c9f0602840dddb9a0e247d5a0bcf3d7266e1"
] |
[
"python/cudf/cudf/tests/test_indexing.py"
] |
[
"from itertools import combinations\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf import DataFrame, Series\nfrom cudf.tests import utils\nfrom cudf.tests.utils import assert_eq\n\nindex_dtypes = [np.int64, np.int32, np.int16, np.int8]\n\n\n@pytest.fixture\ndef pdf_gdf():\n pdf = pd.DataFrame(\n {\"a\": [1, 2, 3], \"b\": [\"c\", \"d\", \"e\"]}, index=[\"one\", \"two\", \"three\"]\n )\n gdf = cudf.from_pandas(pdf)\n return pdf, gdf\n\n\n@pytest.fixture\ndef pdf_gdf_multi():\n pdf = pd.DataFrame(np.random.rand(7, 5))\n pdfIndex = pd.MultiIndex(\n [\n [\"a\", \"b\", \"c\"],\n [\"house\", \"store\", \"forest\"],\n [\"clouds\", \"clear\", \"storm\"],\n [\"fire\", \"smoke\", \"clear\"],\n ],\n [\n [0, 0, 0, 0, 1, 1, 2],\n [1, 1, 1, 1, 0, 0, 2],\n [0, 0, 2, 2, 2, 0, 1],\n [0, 0, 0, 1, 2, 0, 1],\n ],\n )\n pdfIndex.names = [\"alpha\", \"location\", \"weather\", \"sign\"]\n pdf.index = pdfIndex\n gdf = cudf.from_pandas(pdf)\n return pdf, gdf\n\n\n@pytest.mark.parametrize(\n \"i1, i2, i3\",\n (\n [\n (slice(None, 12), slice(3, None), slice(None, None, 2)),\n (range(12), range(3, 12), range(0, 9, 2)),\n (np.arange(12), np.arange(3, 12), np.arange(0, 9, 2)),\n (list(range(12)), list(range(3, 12)), list(range(0, 9, 2))),\n (\n pd.Series(range(12)),\n pd.Series(range(3, 12)),\n pd.Series(range(0, 9, 2)),\n ),\n (Series(range(12)), Series(range(3, 12)), Series(range(0, 9, 2))),\n (\n [i in range(12) for i in range(20)],\n [i in range(3, 12) for i in range(12)],\n [i in range(0, 9, 2) for i in range(9)],\n ),\n (\n np.array([i in range(12) for i in range(20)], dtype=bool),\n np.array([i in range(3, 12) for i in range(12)], dtype=bool),\n np.array([i in range(0, 9, 2) for i in range(9)], dtype=bool),\n ),\n ]\n + [\n (\n np.arange(12, dtype=t),\n np.arange(3, 12, dtype=t),\n np.arange(0, 9, 2, dtype=t),\n )\n for t in index_dtypes\n ]\n ),\n ids=(\n [\n \"slice\",\n \"range\",\n \"numpy.array\",\n \"list\",\n \"pandas.Series\",\n \"Series\",\n \"list[bool]\",\n \"numpy.array[bool]\",\n ]\n + [\"numpy.array[%s]\" % t.__name__ for t in index_dtypes]\n ),\n)\ndef test_series_indexing(i1, i2, i3):\n a1 = np.arange(20)\n series = Series(a1)\n # Indexing\n sr1 = series[i1]\n assert sr1.null_count == 0\n np.testing.assert_equal(sr1.to_array(), a1[:12])\n sr2 = sr1[i2]\n assert sr2.null_count == 0\n np.testing.assert_equal(sr2.to_array(), a1[3:12])\n # Index with stride\n sr3 = sr2[i3]\n assert sr3.null_count == 0\n np.testing.assert_equal(sr3.to_array(), a1[3:12:2])\n\n # Integer indexing\n if isinstance(i1, range):\n for i in i1: # Python int-s\n assert series[i] == a1[i]\n if isinstance(i1, np.ndarray) and i1.dtype in index_dtypes:\n for i in i1: # numpy integers\n assert series[i] == a1[i]\n\n\ndef test_dataframe_column_name_indexing():\n df = DataFrame()\n data = np.asarray(range(10), dtype=np.int32)\n df[\"a\"] = data\n df[1] = data\n np.testing.assert_equal(\n df[\"a\"].to_array(), np.asarray(range(10), dtype=np.int32)\n )\n np.testing.assert_equal(\n df[1].to_array(), np.asarray(range(10), dtype=np.int32)\n )\n\n pdf = pd.DataFrame()\n nelem = 10\n pdf[\"key1\"] = np.random.randint(0, 5, nelem)\n pdf[\"key2\"] = np.random.randint(0, 3, nelem)\n pdf[1] = np.arange(1, 1 + nelem)\n pdf[2] = np.random.random(nelem)\n df = DataFrame.from_pandas(pdf)\n\n assert_eq(df[df.columns], df)\n assert_eq(df[df.columns[:1]], df[[\"key1\"]])\n\n for i in range(1, len(pdf.columns) + 1):\n for idx in combinations(pdf.columns, i):\n assert pdf[list(idx)].equals(df[list(idx)].to_pandas())\n\n # test for only numeric columns\n df = pd.DataFrame()\n for i in range(0, 10):\n df[i] = range(nelem)\n gdf = DataFrame.from_pandas(df)\n assert_eq(gdf, df)\n\n assert_eq(gdf[gdf.columns], gdf)\n assert_eq(gdf[gdf.columns[:3]], gdf[[0, 1, 2]])\n\n\ndef test_dataframe_slicing():\n df = DataFrame()\n size = 123\n df[\"a\"] = ha = np.random.randint(low=0, high=100, size=size).astype(\n np.int32\n )\n df[\"b\"] = hb = np.random.random(size).astype(np.float32)\n df[\"c\"] = hc = np.random.randint(low=0, high=100, size=size).astype(\n np.int64\n )\n df[\"d\"] = hd = np.random.random(size).astype(np.float64)\n\n # Row slice first 10\n first_10 = df[:10]\n assert len(first_10) == 10\n assert tuple(first_10.columns) == (\"a\", \"b\", \"c\", \"d\")\n np.testing.assert_equal(first_10[\"a\"].to_array(), ha[:10])\n np.testing.assert_equal(first_10[\"b\"].to_array(), hb[:10])\n np.testing.assert_equal(first_10[\"c\"].to_array(), hc[:10])\n np.testing.assert_equal(first_10[\"d\"].to_array(), hd[:10])\n del first_10\n\n # Row slice last 10\n last_10 = df[-10:]\n assert len(last_10) == 10\n assert tuple(last_10.columns) == (\"a\", \"b\", \"c\", \"d\")\n np.testing.assert_equal(last_10[\"a\"].to_array(), ha[-10:])\n np.testing.assert_equal(last_10[\"b\"].to_array(), hb[-10:])\n np.testing.assert_equal(last_10[\"c\"].to_array(), hc[-10:])\n np.testing.assert_equal(last_10[\"d\"].to_array(), hd[-10:])\n del last_10\n\n # Row slice [begin:end]\n begin = 7\n end = 121\n subrange = df[begin:end]\n assert len(subrange) == end - begin\n assert tuple(subrange.columns) == (\"a\", \"b\", \"c\", \"d\")\n np.testing.assert_equal(subrange[\"a\"].to_array(), ha[begin:end])\n np.testing.assert_equal(subrange[\"b\"].to_array(), hb[begin:end])\n np.testing.assert_equal(subrange[\"c\"].to_array(), hc[begin:end])\n np.testing.assert_equal(subrange[\"d\"].to_array(), hd[begin:end])\n del subrange\n\n\n@pytest.mark.parametrize(\"step\", [1, 2, 5])\n@pytest.mark.parametrize(\"scalar\", [0, 20, 100])\ndef test_dataframe_loc(scalar, step):\n size = 123\n pdf = pd.DataFrame(\n {\n \"a\": np.random.randint(low=0, high=100, size=size),\n \"b\": np.random.random(size).astype(np.float32),\n \"c\": np.random.random(size).astype(np.float64),\n \"d\": np.random.random(size).astype(np.float64),\n }\n )\n\n df = DataFrame.from_pandas(pdf)\n\n assert_eq(df.loc[:, [\"a\"]], pdf.loc[:, [\"a\"]])\n\n assert_eq(df.loc[:, \"d\"], pdf.loc[:, \"d\"])\n\n # Scalar label\n assert_eq(df.loc[scalar], pdf.loc[scalar])\n\n # Full slice\n assert_eq(df.loc[:, \"c\"], pdf.loc[:, \"c\"])\n\n begin = 110\n end = 122\n\n assert_eq(\n df.loc[begin:end:step, [\"c\", \"d\", \"a\"]],\n pdf.loc[begin:end:step, [\"c\", \"d\", \"a\"]],\n )\n\n assert_eq(df.loc[begin:end, [\"c\", \"d\"]], pdf.loc[begin:end, [\"c\", \"d\"]])\n\n # Slicing on columns:\n assert_eq(\n df.loc[begin:end:step, \"a\":\"c\"], pdf.loc[begin:end:step, \"a\":\"c\"]\n )\n\n # Slicing of size 1:\n assert_eq(df.loc[begin:begin, \"a\"], pdf.loc[begin:begin, \"a\"])\n\n # TODO: Pandas changes the dtype here when it shouldn't\n assert_eq(\n df.loc[begin, \"a\":\"a\"], pdf.loc[begin, \"a\":\"a\"], check_dtype=False\n )\n\n # Make int64 index\n offset = 50\n df2 = df[offset:]\n pdf2 = pdf[offset:]\n begin = 117\n end = 122\n assert_eq(\n df2.loc[begin:end, [\"c\", \"d\", \"a\"]],\n pdf2.loc[begin:end, [\"c\", \"d\", \"a\"]],\n )\n\n\n@pytest.mark.xfail(raises=IndexError, reason=\"label scalar is out of bound\")\ndef test_dataframe_loc_outbound():\n df = DataFrame()\n size = 10\n df[\"a\"] = ha = np.random.randint(low=0, high=100, size=size).astype(\n np.int32\n )\n df[\"b\"] = hb = np.random.random(size).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n np.testing.assert_equal(df.loc[11].to_array(), pdf.loc[11])\n\n\ndef test_series_loc_numerical():\n ps = pd.Series([1, 2, 3, 4, 5], index=[5, 6, 7, 8, 9])\n gs = Series.from_pandas(ps)\n\n assert_eq(ps.loc[5], gs.loc[5])\n assert_eq(ps.loc[6], gs.loc[6])\n assert_eq(ps.loc[6:8], gs.loc[6:8])\n assert_eq(ps.loc[:8], gs.loc[:8])\n assert_eq(ps.loc[6:], gs.loc[6:])\n assert_eq(ps.loc[::2], gs.loc[::2])\n assert_eq(ps.loc[[5, 8, 9]], gs.loc[[5, 8, 9]])\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\ndef test_series_loc_string():\n ps = pd.Series(\n [1, 2, 3, 4, 5], index=[\"one\", \"two\", \"three\", \"four\", \"five\"]\n )\n gs = Series.from_pandas(ps)\n\n assert_eq(ps.loc[\"one\"], gs.loc[\"one\"])\n assert_eq(ps.loc[\"five\"], gs.loc[\"five\"])\n assert_eq(ps.loc[\"two\":\"four\"], gs.loc[\"two\":\"four\"])\n assert_eq(ps.loc[:\"four\"], gs.loc[:\"four\"])\n assert_eq(ps.loc[\"two\":], gs.loc[\"two\":])\n assert_eq(ps.loc[::2], gs.loc[::2])\n assert_eq(ps.loc[[\"one\", \"four\", \"five\"]], gs.loc[[\"one\", \"four\", \"five\"]])\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\ndef test_series_loc_datetime():\n ps = pd.Series(\n [1, 2, 3, 4, 5], index=pd.date_range(\"20010101\", \"20010105\")\n )\n gs = Series.from_pandas(ps)\n\n # a few different ways of specifying a datetime label:\n assert_eq(ps.loc[\"20010101\"], gs.loc[\"20010101\"])\n assert_eq(ps.loc[\"2001-01-01\"], gs.loc[\"2001-01-01\"])\n assert_eq(\n ps.loc[pd.to_datetime(\"2001-01-01\")],\n gs.loc[pd.to_datetime(\"2001-01-01\")],\n )\n assert_eq(\n ps.loc[np.datetime64(\"2001-01-01\")],\n gs.loc[np.datetime64(\"2001-01-01\")],\n )\n\n assert_eq(\n ps.loc[\"2001-01-02\":\"2001-01-05\"], gs.loc[\"2001-01-02\":\"2001-01-05\"]\n )\n assert_eq(ps.loc[\"2001-01-02\":], gs.loc[\"2001-01-02\":])\n assert_eq(ps.loc[:\"2001-01-04\"], gs.loc[:\"2001-01-04\"])\n assert_eq(ps.loc[::2], gs.loc[::2])\n #\n # assert_eq(ps.loc[['2001-01-01', '2001-01-04', '2001-01-05']],\n # gs.loc[['2001-01-01', '2001-01-04', '2001-01-05']])\n # looks like a bug in Pandas doesn't let us check for the above,\n # so instead:\n assert_eq(\n ps.loc[\n [\n pd.to_datetime(\"2001-01-01\"),\n pd.to_datetime(\"2001-01-04\"),\n pd.to_datetime(\"2001-01-05\"),\n ]\n ],\n gs.loc[\n [\n pd.to_datetime(\"2001-01-01\"),\n pd.to_datetime(\"2001-01-04\"),\n pd.to_datetime(\"2001-01-05\"),\n ]\n ],\n )\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\ndef test_series_loc_categorical():\n ps = pd.Series(\n [1, 2, 3, 4, 5], index=pd.Categorical([\"a\", \"b\", \"c\", \"d\", \"e\"])\n )\n gs = Series.from_pandas(ps)\n\n assert_eq(ps.loc[\"a\"], gs.loc[\"a\"])\n assert_eq(ps.loc[\"e\"], gs.loc[\"e\"])\n assert_eq(ps.loc[\"b\":\"d\"], gs.loc[\"b\":\"d\"])\n assert_eq(ps.loc[:\"d\"], gs.loc[:\"d\"])\n assert_eq(ps.loc[\"b\":], gs.loc[\"b\":])\n assert_eq(ps.loc[::2], gs.loc[::2])\n\n # order of categories changes, so we can only\n # compare values:\n assert_eq(\n ps.loc[[\"a\", \"d\", \"e\"]].values, gs.loc[[\"a\", \"d\", \"e\"]].to_array()\n )\n\n assert_eq(\n ps.loc[[True, False, True, False, True]],\n gs.loc[[True, False, True, False, True]],\n )\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n pd.DataFrame(\n {\"a\": [1, 2, 3, 4]},\n index=pd.MultiIndex.from_frame(\n pd.DataFrame(\n {\"A\": [2, 3, 1, 4], \"B\": [\"low\", \"high\", \"high\", \"low\"]}\n )\n ),\n ),\n pd.Series(\n [1, 2, 3, 4],\n index=pd.MultiIndex.from_frame(\n pd.DataFrame(\n {\"A\": [2, 3, 1, 4], \"B\": [\"low\", \"high\", \"high\", \"low\"]}\n )\n ),\n ),\n ],\n)\ndef test_dataframe_series_loc_multiindex(obj):\n pindex = pd.MultiIndex.from_frame(\n pd.DataFrame({\"A\": [3, 2], \"B\": [\"high\", \"low\"]})\n )\n\n gobj = cudf.from_pandas(obj)\n gindex = cudf.MultiIndex.from_pandas(pindex)\n\n # cudf MultinIndex as arg\n expected = obj.loc[pindex]\n got = gobj.loc[gindex]\n assert_eq(expected, got)\n\n # pandas MultinIndex as arg\n expected = obj.loc[pindex]\n got = gobj.loc[pindex]\n assert_eq(expected, got)\n\n\n@pytest.mark.parametrize(\"nelem\", [2, 5, 20, 100])\ndef test_series_iloc(nelem):\n\n # create random series\n np.random.seed(12)\n ps = pd.Series(np.random.sample(nelem))\n\n # gpu series\n gs = Series(ps)\n\n # positive tests for indexing\n np.testing.assert_allclose(gs.iloc[-1 * nelem], ps.iloc[-1 * nelem])\n np.testing.assert_allclose(gs.iloc[-1], ps.iloc[-1])\n np.testing.assert_allclose(gs.iloc[0], ps.iloc[0])\n np.testing.assert_allclose(gs.iloc[1], ps.iloc[1])\n np.testing.assert_allclose(gs.iloc[nelem - 1], ps.iloc[nelem - 1])\n\n # positive tests for slice\n np.testing.assert_allclose(gs.iloc[-1:1], ps.iloc[-1:1])\n np.testing.assert_allclose(\n gs.iloc[nelem - 1 : -1], ps.iloc[nelem - 1 : -1]\n )\n np.testing.assert_allclose(gs.iloc[0 : nelem - 1], ps.iloc[0 : nelem - 1])\n np.testing.assert_allclose(gs.iloc[0:nelem], ps.iloc[0:nelem])\n np.testing.assert_allclose(gs.iloc[1:1], ps.iloc[1:1])\n np.testing.assert_allclose(gs.iloc[1:2], ps.iloc[1:2])\n np.testing.assert_allclose(\n gs.iloc[nelem - 1 : nelem + 1], ps.iloc[nelem - 1 : nelem + 1]\n )\n np.testing.assert_allclose(\n gs.iloc[nelem : nelem * 2], ps.iloc[nelem : nelem * 2]\n )\n\n\n@pytest.mark.parametrize(\"nelem\", [2, 5, 20, 100])\ndef test_dataframe_iloc(nelem):\n gdf = DataFrame()\n\n gdf[\"a\"] = ha = np.random.randint(low=0, high=100, size=nelem).astype(\n np.int32\n )\n gdf[\"b\"] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n assert_eq(gdf.iloc[-1:1], pdf.iloc[-1:1])\n assert_eq(gdf.iloc[nelem - 1 : -1], pdf.iloc[nelem - 1 : -1])\n assert_eq(gdf.iloc[0 : nelem - 1], pdf.iloc[0 : nelem - 1])\n assert_eq(gdf.iloc[0:nelem], pdf.iloc[0:nelem])\n assert_eq(gdf.iloc[1:1], pdf.iloc[1:1])\n assert_eq(gdf.iloc[1:2], pdf.iloc[1:2])\n assert_eq(gdf.iloc[nelem - 1 : nelem + 1], pdf.iloc[nelem - 1 : nelem + 1])\n assert_eq(gdf.iloc[nelem : nelem * 2], pdf.iloc[nelem : nelem * 2])\n\n assert_eq(gdf.iloc[-1 * nelem], pdf.iloc[-1 * nelem])\n assert_eq(gdf.iloc[-1], pdf.iloc[-1])\n assert_eq(gdf.iloc[0], pdf.iloc[0])\n assert_eq(gdf.iloc[1], pdf.iloc[1])\n assert_eq(gdf.iloc[nelem - 1], pdf.iloc[nelem - 1])\n\n\n@pytest.mark.xfail(raises=AssertionError, reason=\"Series.index are different\")\ndef test_dataframe_iloc_tuple():\n gdf = DataFrame()\n nelem = 123\n gdf[\"a\"] = ha = np.random.randint(low=0, high=100, size=nelem).astype(\n np.int32\n )\n gdf[\"b\"] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n # We don't support passing the column names into the index quite yet\n got = gdf.iloc[1, [1]]\n expect = pdf.iloc[1, [1]]\n\n assert_eq(got, expect)\n\n\n@pytest.mark.xfail(\n raises=IndexError, reason=\"positional indexers are out-of-bounds\"\n)\ndef test_dataframe_iloc_index_error():\n gdf = DataFrame()\n nelem = 123\n gdf[\"a\"] = ha = np.random.randint(low=0, high=100, size=nelem).astype(\n np.int32\n )\n gdf[\"b\"] = hb = np.random.random(nelem).astype(np.float32)\n\n pdf = pd.DataFrame()\n pdf[\"a\"] = ha\n pdf[\"b\"] = hb\n\n def assert_col(g, p):\n np.testing.assert_equal(g[\"a\"].to_array(), p[\"a\"])\n np.testing.assert_equal(g[\"b\"].to_array(), p[\"b\"])\n\n assert_col(gdf.iloc[nelem * 2], pdf.iloc[nelem * 2])\n\n\n@pytest.mark.parametrize(\"ntake\", [0, 1, 10, 123, 122, 200])\ndef test_dataframe_take(ntake):\n np.random.seed(0)\n df = DataFrame()\n\n nelem = 123\n df[\"ii\"] = ii = np.random.randint(0, 20, nelem)\n df[\"ff\"] = ff = np.random.random(nelem)\n\n take_indices = np.random.randint(0, len(df), ntake)\n\n out = df.take(take_indices)\n assert len(out) == ntake\n assert out.ii.null_count == 0\n assert out.ff.null_count == 0\n np.testing.assert_array_equal(out.ii.to_array(), ii[take_indices])\n np.testing.assert_array_equal(out.ff.to_array(), ff[take_indices])\n np.testing.assert_array_equal(out.index, take_indices)\n\n\n@pytest.mark.parametrize(\"keep_index\", [True, False])\n@pytest.mark.parametrize(\"ntake\", [0, 1, 10, 123, 122, 200])\ndef test_series_take(ntake, keep_index):\n np.random.seed(0)\n nelem = 123\n\n data = np.random.randint(0, 20, nelem)\n sr = Series(data)\n\n take_indices = np.random.randint(0, len(sr), ntake)\n\n if keep_index is True:\n out = sr.take(take_indices)\n np.testing.assert_array_equal(out.to_array(), data[take_indices])\n elif keep_index is False:\n out = sr.take(take_indices, keep_index=False)\n np.testing.assert_array_equal(out.to_array(), data[take_indices])\n np.testing.assert_array_equal(\n out.index.to_array(), sr.index.to_array()\n )\n\n\n@pytest.mark.parametrize(\"nelem\", [0, 1, 5, 20, 100])\n@pytest.mark.parametrize(\"slice_start\", [None, 0, 1, 3, 10, -10])\n@pytest.mark.parametrize(\"slice_end\", [None, 0, 1, 30, 50, -1])\ndef test_dataframe_masked_slicing(nelem, slice_start, slice_end):\n gdf = DataFrame()\n gdf[\"a\"] = list(range(nelem))\n gdf[\"b\"] = list(range(nelem, 2 * nelem))\n gdf[\"a\"] = gdf[\"a\"].set_mask(utils.random_bitmask(nelem))\n gdf[\"b\"] = gdf[\"b\"].set_mask(utils.random_bitmask(nelem))\n\n def do_slice(x):\n return x[slice_start:slice_end]\n\n expect = do_slice(gdf.to_pandas())\n got = do_slice(gdf).to_pandas()\n\n pd.testing.assert_frame_equal(expect, got)\n\n\ndef test_dataframe_boolean_mask_with_None():\n pdf = pd.DataFrame({\"a\": [0, 1, 2, 3], \"b\": [0.1, 0.2, None, 0.3]})\n gdf = DataFrame.from_pandas(pdf)\n pdf_masked = pdf[[True, False, True, False]]\n gdf_masked = gdf[[True, False, True, False]]\n assert_eq(pdf_masked, gdf_masked)\n\n\n@pytest.mark.parametrize(\"dtype\", [int, float, str])\ndef test_empty_boolean_mask(dtype):\n gdf = cudf.datasets.randomdata(nrows=0, dtypes={\"a\": dtype})\n pdf = gdf.to_pandas()\n\n compare_val = dtype(1)\n\n expected = pdf[pdf.a == compare_val]\n got = gdf[gdf.a == compare_val]\n assert_eq(expected, got)\n\n expected = pdf.a[pdf.a == compare_val]\n got = gdf.a[gdf.a == compare_val]\n assert_eq(expected, got)\n\n\n@pytest.mark.parametrize(\n \"data\",\n [\n [1, 2, 3, 4],\n [1.0, 2.0, 3.0, 4.0],\n [\"one\", \"two\", \"three\", \"four\"],\n pd.Series([\"a\", \"b\", \"c\", \"d\"], dtype=\"category\"),\n pd.Series(pd.date_range(\"2010-01-01\", \"2010-01-04\")),\n ],\n)\n@pytest.mark.parametrize(\n \"mask\",\n [\n [True, True, True, True],\n [False, False, False, False],\n [True, False, True, False],\n [True, False, False, True],\n np.array([True, False, True, False]),\n pd.Series([True, False, True, False]),\n cudf.Series([True, False, True, False]),\n ],\n)\n@pytest.mark.parametrize(\"nulls\", [\"one\", \"some\", \"all\", \"none\"])\ndef test_series_apply_boolean_mask(data, mask, nulls):\n psr = pd.Series(data)\n\n if len(data) > 0:\n if nulls == \"one\":\n p = np.random.randint(0, 4)\n psr[p] = None\n elif nulls == \"some\":\n p1, p2 = np.random.randint(0, 4, (2,))\n psr[p1] = None\n psr[p2] = None\n elif nulls == \"all\":\n psr[:] = None\n\n gsr = cudf.from_pandas(psr)\n\n # TODO: from_pandas(psr) has dtype \"float64\"\n # when psr has dtype \"object\" and is all None\n if psr.dtype == \"object\" and nulls == \"all\":\n gsr = cudf.Series([None, None, None, None], dtype=\"object\")\n\n if isinstance(mask, cudf.Series):\n expect = psr[mask.to_pandas()]\n else:\n expect = psr[mask]\n got = gsr[mask]\n\n assert_eq(expect, got)\n\n\ndef test_dataframe_apply_boolean_mask():\n pdf = pd.DataFrame(\n {\n \"a\": [0, 1, 2, 3],\n \"b\": [0.1, 0.2, None, 0.3],\n \"c\": [\"a\", None, \"b\", \"c\"],\n }\n )\n gdf = DataFrame.from_pandas(pdf)\n assert_eq(pdf[[True, False, True, False]], gdf[[True, False, True, False]])\n\n\n\"\"\"\nThis test compares cudf and Pandas dataframe boolean indexing.\n\"\"\"\n\n\n@pytest.mark.parametrize(\n \"mask_fn\", [lambda x: x, lambda x: np.array(x), lambda x: pd.Series(x)]\n)\ndef test_dataframe_boolean_mask(mask_fn):\n mask_base = [\n True,\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n True,\n False,\n ]\n pdf = pd.DataFrame({\"x\": range(10), \"y\": range(10)})\n gdf = cudf.from_pandas(pdf)\n mask = mask_fn(mask_base)\n assert len(mask) == gdf.shape[0]\n pdf_masked = pdf[mask]\n gdf_masked = gdf[mask]\n assert pdf_masked.to_string().split() == gdf_masked.to_string().split()\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, 4),\n (1, 4),\n ([0, 1], 4),\n ([0, 1], [4, 5]),\n (slice(0, 2), [4, 5]),\n (slice(1, None), [4, 5, 6, 7]),\n ([], 1),\n ([], []),\n (slice(None, None), 1),\n (slice(-1, -3), 7),\n ],\n)\n@pytest.mark.parametrize(\"nulls\", [\"none\", \"some\", \"all\"])\ndef test_series_setitem_basics(key, value, nulls):\n psr = pd.Series([1, 2, 3, 4, 5])\n if nulls == \"some\":\n psr[[0, 4]] = None\n elif nulls == \"all\":\n psr[:] = None\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr, check_dtype=False)\n\n\ndef test_series_setitem_null():\n gsr = cudf.Series([1, 2, 3, 4])\n gsr[0] = None\n\n expect = cudf.Series([None, 2, 3, 4])\n got = gsr\n assert_eq(expect, got)\n\n gsr = cudf.Series([None, 2, 3, 4])\n gsr[0] = 1\n\n expect = cudf.Series([1, 2, 3, 4])\n got = gsr\n assert_eq(expect, got)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, 4),\n (1, 4),\n ([0, 1], 4),\n ([0, 1], [4, 5]),\n (slice(0, 2), [4, 5]),\n (slice(1, None), [4, 5, 6, 7]),\n ([], 1),\n ([], []),\n (slice(None, None), 1),\n (slice(-1, -3), 7),\n ],\n)\n@pytest.mark.parametrize(\"nulls\", [\"none\", \"some\", \"all\"])\ndef test_series_setitem_iloc(key, value, nulls):\n psr = pd.Series([1, 2, 3, 4, 5])\n if nulls == \"some\":\n psr[[0, 4]] = None\n elif nulls == \"all\":\n psr[:] = None\n gsr = cudf.from_pandas(psr)\n psr.iloc[key] = value\n gsr.iloc[key] = value\n assert_eq(psr, gsr, check_dtype=False)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, 0.5),\n ([0, 1], 0.5),\n ([0, 1], [0.5, 2.5]),\n (slice(0, 2), [0.5, 0.25]),\n ],\n)\ndef test_series_setitem_dtype(key, value):\n psr = pd.Series([1, 2, 3], dtype=\"int32\")\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr)\n\n\ndef test_series_setitem_datetime():\n psr = pd.Series([\"2001\", \"2002\", \"2003\"], dtype=\"datetime64[ns]\")\n gsr = cudf.from_pandas(psr)\n\n psr[0] = \"2005\"\n gsr[0] = \"2005\"\n\n assert_eq(psr, gsr)\n\n\ndef test_series_setitem_categorical():\n psr = pd.Series([\"a\", \"b\", \"a\", \"c\", \"d\"], dtype=\"category\")\n gsr = cudf.from_pandas(psr)\n\n psr[0] = \"d\"\n gsr[0] = \"d\"\n assert_eq(psr, gsr)\n\n psr = psr.cat.add_categories([\"e\"])\n gsr = gsr.cat.add_categories([\"e\"])\n psr[0] = \"e\"\n gsr[0] = \"e\"\n assert_eq(psr, gsr)\n\n psr[[0, 1]] = \"b\"\n gsr[[0, 1]] = \"b\"\n assert_eq(psr, gsr)\n\n psr[0:3] = \"e\"\n gsr[0:3] = \"e\"\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (0, \"d\"),\n (0, \"g\"),\n ([0, 1], \"g\"),\n ([0, 1], None),\n (slice(None, 2), \"g\"),\n (slice(None, 2), [\"g\", None]),\n ],\n)\ndef test_series_setitem_string(key, value):\n psr = pd.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr)\n\n psr = pd.Series([\"a\", None, \"c\", \"d\", \"e\"])\n gsr = cudf.from_pandas(psr)\n psr[key] = value\n gsr[key] = value\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n (\"a\", 4),\n (\"b\", 4),\n ([\"a\", \"b\"], 4),\n ([\"a\", \"b\"], [4, 5]),\n ([True, False, True], 4),\n ([False, False, False], 4),\n ([True, False, True], [4, 5]),\n ],\n)\ndef test_series_setitem_loc(key, value):\n psr = pd.Series([1, 2, 3], [\"a\", \"b\", \"c\"])\n gsr = cudf.from_pandas(psr)\n psr.loc[key] = value\n gsr.loc[key] = value\n assert_eq(psr, gsr)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n ((0, 0), 5),\n ((slice(None), 0), 5),\n ((slice(None), 0), range(3)),\n ((slice(None, -1), 0), range(2)),\n (([0, 1], 0), 5),\n ],\n)\ndef test_dataframe_setitem_iloc(key, value, pdf_gdf):\n pdf, gdf = pdf_gdf\n pdf.iloc[key] = value\n gdf.iloc[key] = value\n assert_eq(pdf, gdf)\n\n\n@pytest.mark.parametrize(\n \"key, value\",\n [\n ((\"one\", \"a\"), 5),\n ((slice(None), \"a\"), 5),\n ((slice(None), \"a\"), range(3)),\n ((slice(None, \"two\"), \"a\"), range(2)),\n (([\"one\", \"two\"], \"a\"), 5),\n ],\n)\ndef test_dataframe_setitem_loc(key, value, pdf_gdf):\n pdf, gdf = pdf_gdf\n pdf.loc[key] = value\n gdf.loc[key] = value\n assert_eq(pdf, gdf)\n\n\n@pytest.mark.parametrize(\n \"key,value\",\n [\n ((0, 0), 5.0),\n ((slice(None), 0), 5.0),\n ((slice(None), 0), np.arange(7, dtype=\"float64\")),\n ],\n)\ndef test_dataframe_setitem_iloc_multiindex(key, value, pdf_gdf_multi):\n pdf, gdf = pdf_gdf_multi\n\n pdf.iloc[key] = value\n gdf.iloc[key] = value\n\n assert_eq(pdf, gdf)\n\n\ndef test_boolean_indexing_single_row(pdf_gdf):\n pdf, gdf = pdf_gdf\n assert_eq(\n pdf.loc[[True, False, False], :], gdf.loc[[True, False, False], :]\n )\n\n\ndef test_iloc_negative_indices():\n psr = pd.Series([1, 2, 3, 4, 5])\n gsr = cudf.from_pandas(psr)\n assert_eq(psr.iloc[[-1, -2, -4]], gsr.iloc[[-1, -2, -4]])\n\n\ndef test_out_of_bounds_indexing():\n a = cudf.Series([1, 2, 3])\n with pytest.raises(IndexError):\n a[[0, 1, 9]]\n with pytest.raises(IndexError):\n a[[0, 1, -4]]\n with pytest.raises(IndexError):\n a[[0, 1, 9]] = 2\n with pytest.raises(IndexError):\n a[[0, 1, -4]] = 2\n\n\ndef test_sliced_indexing():\n a = list(range(4, 4 + 150))\n b = list(range(0, 0 + 150))\n pdf = pd.DataFrame({\"a\": a, \"b\": b})\n gdf = DataFrame.from_pandas(pdf)\n pdf = pdf.set_index(\"a\")\n gdf = gdf.set_index(\"a\")\n pidx = pdf.index[:75]\n gidx = gdf.index[:75]\n\n assert_eq(pdf.loc[pidx], gdf.loc[gidx])\n\n\n@pytest.mark.parametrize(\"index\", [[\"a\"], [\"a\", \"a\"], [\"a\", \"a\", \"b\", \"c\"]])\ndef test_iloc_categorical_index(index):\n gdf = cudf.DataFrame({\"data\": range(len(index))}, index=index)\n gdf.index = gdf.index.astype(\"category\")\n pdf = gdf.to_pandas()\n expect = pdf.iloc[:, 0]\n got = gdf.iloc[:, 0]\n assert_eq(expect, got)\n"
] |
[
[
"pandas.to_datetime",
"numpy.random.random",
"pandas.Series",
"numpy.random.seed",
"pandas.MultiIndex",
"pandas.date_range",
"numpy.arange",
"pandas.Categorical",
"pandas.DataFrame",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"pandas.testing.assert_frame_equal",
"numpy.random.rand",
"numpy.random.sample",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
]
] |
buzbarstow/collectionmc
|
[
"0efd107d675f953d74259f2f9c396e51a1d9879c"
] |
[
"kosudoku/montecarlo.py"
] |
[
"# ------------------------------------------------------------------------------------------------ #\ndef ImportEssentialityData(fileName):\n# Not yet ready for prime time\n# Import a defined format essentiality data file\n# Assumes that data is in the format: locus tag, gene name, essentiality\t\n\n\tfrom .utils import ParseCSVLine\n\n\tfileHandle = open(fileName, 'r')\n\tdata = fileHandle.readlines()\n\tdataDict = {}\n\t\n\ti = 0\n\twhile i < len(data):\t\n\t\t# Ignore comment lines\n\t\tif data[i][0] != '#':\n\t\t\tdataLine = ParseCSVLine(data[i])\n\t\t\tdataDict[dataLine[0]] = [dataLine[1], dataLine[2]]\n\t\ti += 1\n\n\treturn dataDict\n# ------------------------------------------------------------------------------------------------ #\n\n\n# ------------------------------------------------------------------------------------------------ #\ndef BuildEssentialityDictThatIsKeyedByLocusTag(dataArray):\n# Not yet ready for prime time\n# Build essentiality data dict that is keyed by locus tag\n\t\n\tessentialityDict = {}\n\t\n\tlocusTags = []\n\t\n\theadersWithoutSysName = []\n\t\n\ti = 0\n\twhile i < len(headers):\n\t\tif headers[i] != 'sysName':\n\t\t\theadersWithoutSysName.append(headers[i])\n\t\ti += 1\n\t\n\tdataDict = {}\n\t\n\tfor line in dataArray:\n\t\t\n\t\t\n\t\tdataDict[line['sysName']] = {}\n\t\t\n\t\tfor header in headersWithoutSysName:\n\t\t\tdataDict[line['sysName']][header] = line[header]\n\t\n\treturn dataDict\n# ------------------------------------------------------------------------------------------------ #\n\n# ------------------------------------------------------------------------------------------------ #\ndef BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures):\n# Not yet ready for prime time\n\n\ti = 0\n\tcdsDict = {}\n\twhile i < len(cdsFeatures):\n\t\tlocusTag = cdsFeatures[i].tagDict['locus_tag'][0]\n\t\tcdsDict[locusTag] = cdsFeatures[i]\n\t\ti += 1\n\t\n\treturn cdsDict\n# ------------------------------------------------------------------------------------------------ #\n\n\n# ------------------------------------------------------------------------------------------------ #\ndef SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\\ntransposonCoordToFeatureDict, maxMutants):\n\t\n\tfrom numpy.random import choice\n\timport pdb\n\t\n\tnonEssentialGeneCount = len(hittableFeatures)\n\t\n\t\n\tfeatureHitCountDict = {}\n\tfor feature in hittableFeatures:\n\t\tfeatureHitCountDict[feature] = 0\n\t\n\tfeaturesHitAtLeastOnce = 0\n\t\n\tfeaturesHitAtLeastOnceVersusMutant = []\n\t\n\ti = 1\n\twhile i <= maxMutants:\n\t\trandomCoord = int(choice(hittableTransposonCoords))\n\t\t\t\t\n\t\tfeaturesToBeHit = transposonCoordToFeatureDict[randomCoord]\n\t\t\n\t\tisAnyFeatureIncludingThisCoordNotHittable = False\n\t\t\n\t\tfor featureToBeHit in featuresToBeHit:\n\t\t\tif featureToBeHit in notHittableFeatures:\n\t\t\t\tisAnyFeatureIncludingThisCoordNotHittable = True\n\t\t\n\t\tif isAnyFeatureIncludingThisCoordNotHittable == False:\n\t\t\tfor featureToBeHit in featuresToBeHit:\n\t\t\t\ttry:\n\t\t\t\t\tfeatureHitCountDict[featureToBeHit] += 1\n\t\t\t\texcept:\n\t\t\t\t\tpdb.set_trace()\n\t\t\n\t\t\t\tif featureHitCountDict[featureToBeHit] == 1:\n\t\t\t\t\tfeaturesHitAtLeastOnce += 1\n\t\t\n\t\tfeaturesHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce)\n\t\t\n\t\ti += 1\n\t\n\treturn featuresHitAtLeastOnceVersusMutant\n# ------------------------------------------------------------------------------------------------ #\n\n# ------------------------------------------------------------------------------------------------ #\ndef SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants):\n\t\n\tfrom scipy import unique, intersect1d\n\tfrom numpy import mean, std, arange\n\timport xml.etree.ElementTree as ET\n\timport pdb\n\t\n\t\n\ttransposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r')\n\n\n\ttransposonCoordToFeatureDict = {}\n\thittableFeatures = []\n\thittableTransposonCoords = []\n\tnotHittableTransposonCoords = []\n\tnotHittableFeatures = []\n\totherFeatures = []\n\n\n\ttree = ET.parse(transposonCoordToFeatureDictFile)\n\troot = tree.getroot()\n\timportedCoordsList = root.findall('coord')\n\n\tfor coord in importedCoordsList:\n\t\t\n\t\tcoordinate = int(coord.attrib['coord'])\n\t\tloci = coord.findall('locus')\n\t\n\t\timportedCoordsKeys = transposonCoordToFeatureDict.keys()\n\t\n\t\tif coordinate not in importedCoordsKeys:\n\t\t\ttransposonCoordToFeatureDict[coordinate] = []\n\t\n\t\tfor locus in loci:\n\t\t\tlocusName = locus.attrib['locus']\n\t\t\tessentiality = locus.attrib['essentiality']\n\t\t\ttransposonCoordToFeatureDict[coordinate].append(locusName)\n\n\t\t\tif essentiality == 'Dispensable':\n\t\t\t\thittableTransposonCoords.append(coordinate)\n\t\t\t\thittableFeatures.append(locusName)\n\t\t\telif essentiality == 'Essential':\n\t\t\t\tnotHittableFeatures.append(locusName)\n\t\t\t\tnotHittableTransposonCoords.append(coordinate)\n\t\t\telse:\n\t\t\t\totherFeatures.append(locusName)\n\t\t\t\tprint(locusName)\n\n\n\thittableFeatures = unique(hittableFeatures)\n\thittableTransposonCoords = unique(hittableTransposonCoords)\n\tnotHittableFeatures = unique(notHittableFeatures)\n\totherFeatures = unique(otherFeatures)\n\t\n\tintersection = intersect1d(hittableFeatures, notHittableFeatures)\n\t\n\t\t\n\t# Simulate a number of picking runs\n\tfeaturesHitAtLeastOnceTrialsArray = []\n\ti = 0\n\twhile i < numberOfTrials:\n\t\tfeaturesHitAtLeastOnceVersusMutant = \\\n\t\tSimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \\\n\t\ttransposonCoordToFeatureDict, maxMutants)\n\t\n\t\tfeaturesHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant)\t\n\t\ti += 1\n\t\t\n\t# Collect together then data from the picking runs for calculation of mean and standard \n\t# deviation of number of hits picked\n\n\ti = 0\n\tcollectedFeatureHitCountArray = []\n\twhile i < len(featuresHitAtLeastOnceTrialsArray[0]):\n\t\tcollectedFeatureHitCountArray.append([])\n\t\ti += 1\n\n\ti = 0\n\twhile i < len(collectedFeatureHitCountArray):\n\t\tj = 0\n\t\twhile j < len(featuresHitAtLeastOnceTrialsArray):\n\t\t\tcollectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i])\n\t\t\tj += 1\n\t\ti += 1\n\n\taverageFeatureHitCount = []\n\tsdFeatureHitCount = []\n\tfeatureHitCountUpperBound = []\n\tfeatureHitCountLowerBound = []\n\n\t# Calculate the mean and standard deviation of the number of unique features hit at each pick\n\t# from the trials\n\n\ti = 0\n\twhile i < len(collectedFeatureHitCountArray):\n\t\taverageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i]))\n\t\tsdFeatureHitCount.append(std(collectedFeatureHitCountArray[i]))\n\n\t\tfeatureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i])\n\t\tfeatureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i])\n\n\t\ti += 1\n\t\n\t\n\t# Prepare an x axis (the number of mutants picked) for the output\n\tiAxis = arange(1, maxMutants+1, 1)\n\t\n\tnoUniqHittableFeatures = len(hittableFeatures)\n\t\n\treturn [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \\\n\tfeatureHitCountLowerBound, noUniqHittableFeatures ]\n# ------------------------------------------------------------------------------------------------ #\n\n\n\n# ------------------------------------------------------------------------------------------------ #\ndef PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures):\n\t\n\tfrom numpy import exp, array, float\n\t\n\tuniqueGenesHit = []\n\ti = 0\n\twhile i < len(iAxis):\n\t\tans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures))\n\t\tuniqueGenesHit.append(ans)\n\t\ti += 1\n\t\t\n\tuniqueGenesHit = array(uniqueGenesHit, float)\n\t\n\treturn uniqueGenesHit\n# ------------------------------------------------------------------------------------------------ #\n\n\n\n\n\n\n\n\n# ------------------------------------------------------------------------------------------------ #\ndef FindATandTAPositions2(genomeFile, format='genbank'):\n# Does the same thing as FindATandTAPositions but can work with a GenBank or a Fasta file, \\\n# so you only need one file format\n\t\n\timport re\n\tfrom pdb import set_trace\n\t\n\tif format == 'genbank':\n\t\tsequence = ImportGenBankSequence(genomeFile)\n\telif format == 'fasta':\n\t\tsequence = ImportFastaSequence(genomeFile)\n\t\n\tATandTAPositions = []\n\t\n\tatRegex = re.compile('(at|ta)', re.IGNORECASE)\n\t\n# \tset_trace()\n\t\n\ti = 0\n\twhile i < len(sequence) - 1:\n\t\tatMatch = atRegex.match(sequence[i:i+2])\n\t\t\n\t\tif atMatch != None:\n\t\t\tATandTAPositions.append(i+1)\n\t\t\n\t\ti += 1\n\t\n\t\n\t\n\treturn [ATandTAPositions, sequence]\n# ------------------------------------------------------------------------------------------------ #\n\n\n"
] |
[
[
"scipy.unique",
"numpy.random.choice",
"numpy.arange",
"scipy.intersect1d",
"numpy.std",
"numpy.mean",
"numpy.exp",
"numpy.array"
]
] |
hazenai/cvat
|
[
"4ac44df577b79f3329b036e78e71c08bb384e7e2"
] |
[
"cvat/apps/engine/media_extractors.py"
] |
[
"# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport os\nimport tempfile\nimport shutil\nimport zipfile\nimport io\nfrom abc import ABC, abstractmethod\n\nimport av\nimport numpy as np\nfrom pyunpack import Archive\nfrom PIL import Image, ImageFile\n\n# fixes: \"OSError:broken data stream\" when executing line 72 while loading images downloaded from the web\n# see: https://stackoverflow.com/questions/42462431/oserror-broken-data-stream-when-reading-image-file\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nfrom cvat.apps.engine.mime_types import mimetypes\n\ndef get_mime(name):\n for type_name, type_def in MEDIA_TYPES.items():\n if type_def['has_mime_type'](name):\n return type_name\n\n return 'unknown'\n\ndef create_tmp_dir():\n return tempfile.mkdtemp(prefix='cvat-', suffix='.data')\n\ndef delete_tmp_dir(tmp_dir):\n if tmp_dir:\n shutil.rmtree(tmp_dir)\n\nclass IMediaReader(ABC):\n def __init__(self, source_path, step, start, stop):\n self._source_path = sorted(source_path)\n self._step = step\n self._start = start\n self._stop = stop\n\n @abstractmethod\n def __iter__(self):\n pass\n\n @abstractmethod\n def get_preview(self):\n pass\n\n @abstractmethod\n def get_progress(self, pos):\n pass\n\n @staticmethod\n def _get_preview(obj):\n PREVIEW_SIZE = (256, 256)\n if isinstance(obj, io.IOBase):\n preview = Image.open(obj)\n else:\n preview = obj\n preview.thumbnail(PREVIEW_SIZE)\n\n return preview.convert('RGB')\n\n @abstractmethod\n def get_image_size(self):\n pass\n\nclass ImageListReader(IMediaReader):\n def __init__(self, source_path, step=1, start=0, stop=None):\n if not source_path:\n raise Exception('No image found')\n\n if stop is None:\n stop = len(source_path)\n else:\n stop = min(len(source_path), stop + 1)\n step = max(step, 1)\n assert stop > start\n\n super().__init__(\n source_path=source_path,\n step=step,\n start=start,\n stop=stop,\n )\n\n def __iter__(self):\n for i in range(self._start, self._stop, self._step):\n yield (self.get_image(i), self.get_path(i), i)\n\n def get_path(self, i):\n return self._source_path[i]\n\n def get_image(self, i):\n return self._source_path[i]\n\n def get_progress(self, pos):\n return (pos - self._start + 1) / (self._stop - self._start)\n\n def get_preview(self):\n fp = open(self._source_path[0], \"rb\")\n return self._get_preview(fp)\n\n def get_image_size(self):\n img = Image.open(self._source_path[0])\n return img.width, img.height\n\nclass DirectoryReader(ImageListReader):\n def __init__(self, source_path, step=1, start=0, stop=None):\n image_paths = []\n for source in source_path:\n for root, _, files in os.walk(source):\n paths = [os.path.join(root, f) for f in files]\n paths = filter(lambda x: get_mime(x) == 'image', paths)\n image_paths.extend(paths)\n super().__init__(\n source_path=image_paths,\n step=step,\n start=start,\n stop=stop,\n )\n\nclass ArchiveReader(DirectoryReader):\n def __init__(self, source_path, step=1, start=0, stop=None):\n self._archive_source = source_path[0]\n Archive(self._archive_source).extractall(os.path.dirname(source_path[0]))\n super().__init__(\n source_path=[os.path.dirname(source_path[0])],\n step=step,\n start=start,\n stop=stop,\n )\n\n def __del__(self):\n os.remove(self._archive_source)\n\nclass PdfReader(DirectoryReader):\n def __init__(self, source_path, step=1, start=0, stop=None):\n if not source_path:\n raise Exception('No PDF found')\n\n from pdf2image import convert_from_path\n self._pdf_source = source_path[0]\n self._tmp_dir = create_tmp_dir()\n file_ = convert_from_path(self._pdf_source)\n basename = os.path.splitext(os.path.basename(self._pdf_source))[0]\n for page_num, page in enumerate(file_):\n output = os.path.join(self._tmp_dir, '{}{:09d}.jpeg'.format(basename, page_num))\n page.save(output, 'JPEG')\n\n super().__init__(\n source_path=[self._tmp_dir],\n step=step,\n start=start,\n stop=stop,\n )\n\n def __del__(self):\n delete_tmp_dir(self._tmp_dir)\n\n def get_path(self, i):\n base_dir = os.path.dirname(self._pdf_source)\n return os.path.join(base_dir, os.path.relpath(self._source_path[i], self._tmp_dir))\n\nclass ZipReader(ImageListReader):\n def __init__(self, source_path, step=1, start=0, stop=None):\n self._zip_source = zipfile.ZipFile(source_path[0], mode='r')\n file_list = [f for f in self._zip_source.namelist() if get_mime(f) == 'image']\n super().__init__(file_list, step, start, stop)\n\n def __del__(self):\n self._zip_source.close()\n\n def get_preview(self):\n io_image = io.BytesIO(self._zip_source.read(self._source_path[0]))\n return self._get_preview(io_image)\n\n def get_image_size(self):\n img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[0])))\n return img.width, img.height\n\n def get_image(self, i):\n return io.BytesIO(self._zip_source.read(self._source_path[i]))\n\n def get_path(self, i):\n if self._zip_source.filename:\n return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i])\n else: # necessary for mime_type definition\n return self._source_path[i]\n\n def extract(self):\n self._zip_source.extractall(os.path.dirname(self._zip_source.filename))\n os.remove(self._zip_source.filename)\n\nclass VideoReader(IMediaReader):\n def __init__(self, source_path, step=1, start=0, stop=None):\n super().__init__(\n source_path=source_path,\n step=step,\n start=start,\n stop=stop + 1 if stop is not None else stop,\n )\n\n def _has_frame(self, i):\n if i >= self._start:\n if (i - self._start) % self._step == 0:\n if self._stop is None or i < self._stop:\n return True\n\n return False\n\n def _decode(self, container):\n frame_num = 0\n for packet in container.demux():\n if packet.stream.type == 'video':\n for image in packet.decode():\n frame_num += 1\n if self._has_frame(frame_num - 1):\n yield (image, self._source_path[0], image.pts)\n\n def __iter__(self):\n container = self._get_av_container()\n source_video_stream = container.streams.video[0]\n source_video_stream.thread_type = 'AUTO'\n\n return self._decode(container)\n\n def get_progress(self, pos):\n container = self._get_av_container()\n # Not for all containers return real value\n stream = container.streams.video[0]\n return pos / stream.duration if stream.duration else None\n\n def _get_av_container(self):\n return av.open(self._source_path[0])\n\n def get_preview(self):\n container = self._get_av_container()\n stream = container.streams.video[0]\n preview = next(container.decode(stream))\n return self._get_preview(preview.to_image())\n\n def get_image_size(self):\n image = (next(iter(self)))[0]\n return image.width, image.height\n\nclass IChunkWriter(ABC):\n def __init__(self, quality):\n self._image_quality = quality\n\n @staticmethod\n def _compress_image(image_path, quality):\n image = image_path.to_image() if isinstance(image_path, av.VideoFrame) else Image.open(image_path)\n # Ensure image data fits into 8bit per pixel before RGB conversion as PIL clips values on conversion\n if image.mode == \"I\":\n # Image mode is 32bit integer pixels.\n # Autoscale pixels by factor 2**8 / im_data.max() to fit into 8bit\n im_data = np.array(image)\n im_data = im_data * (2**8 / im_data.max())\n image = Image.fromarray(im_data.astype(np.int32))\n converted_image = image.convert('RGB')\n image.close()\n buf = io.BytesIO()\n converted_image.save(buf, format='JPEG', quality=quality, optimize=True)\n buf.seek(0)\n width, height = converted_image.size\n converted_image.close()\n return width, height, buf\n\n @abstractmethod\n def save_as_chunk(self, images, chunk_path):\n pass\n\nclass ZipChunkWriter(IChunkWriter):\n def save_as_chunk(self, images, chunk_path):\n with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:\n for idx, (image, path, _) in enumerate(images):\n arcname = '{:06d}{}'.format(idx, os.path.splitext(path)[1])\n if isinstance(image, io.BytesIO):\n zip_chunk.writestr(arcname, image.getvalue())\n else:\n zip_chunk.write(filename=image, arcname=arcname)\n # return empty list because ZipChunkWriter write files as is\n # and does not decode it to know img size.\n return []\n\nclass ZipCompressedChunkWriter(IChunkWriter):\n def save_as_chunk(self, images, chunk_path):\n image_sizes = []\n with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:\n for idx, (image, _ , _) in enumerate(images):\n w, h, image_buf = self._compress_image(image, self._image_quality)\n image_sizes.append((w, h))\n arcname = '{:06d}.jpeg'.format(idx)\n zip_chunk.writestr(arcname, image_buf.getvalue())\n\n return image_sizes\n\nclass Mpeg4ChunkWriter(IChunkWriter):\n def __init__(self, _):\n super().__init__(17)\n self._output_fps = 25\n\n @staticmethod\n def _create_av_container(path, w, h, rate, options, f='mp4'):\n # x264 requires width and height must be divisible by 2 for yuv420p\n if h % 2:\n h += 1\n if w % 2:\n w += 1\n\n container = av.open(path, 'w',format=f)\n video_stream = container.add_stream('libx264', rate=rate)\n video_stream.pix_fmt = \"yuv420p\"\n video_stream.width = w\n video_stream.height = h\n video_stream.options = options\n\n return container, video_stream\n\n def save_as_chunk(self, images, chunk_path):\n if not images:\n raise Exception('no images to save')\n\n input_w = images[0][0].width\n input_h = images[0][0].height\n\n output_container, output_v_stream = self._create_av_container(\n path=chunk_path,\n w=input_w,\n h=input_h,\n rate=self._output_fps,\n options={\n \"crf\": str(self._image_quality),\n \"preset\": \"ultrafast\",\n },\n )\n\n self._encode_images(images, output_container, output_v_stream)\n output_container.close()\n return [(input_w, input_h)]\n\n @staticmethod\n def _encode_images(images, container, stream):\n for frame, _, _ in images:\n # let libav set the correct pts and time_base\n frame.pts = None\n frame.time_base = None\n\n for packet in stream.encode(frame):\n container.mux(packet)\n\n # Flush streams\n for packet in stream.encode():\n container.mux(packet)\n\nclass Mpeg4CompressedChunkWriter(Mpeg4ChunkWriter):\n def __init__(self, quality):\n # translate inversed range [1:100] to [0:51]\n self._image_quality = round(51 * (100 - quality) / 99)\n self._output_fps = 25\n\n\n def save_as_chunk(self, images, chunk_path):\n if not images:\n raise Exception('no images to save')\n\n input_w = images[0][0].width\n input_h = images[0][0].height\n\n downscale_factor = 1\n while input_h / downscale_factor >= 1080:\n downscale_factor *= 2\n\n output_h = input_h // downscale_factor\n output_w = input_w // downscale_factor\n\n output_container, output_v_stream = self._create_av_container(\n path=chunk_path,\n w=output_w,\n h=output_h,\n rate=self._output_fps,\n options={\n 'profile': 'baseline',\n 'coder': '0',\n 'crf': str(self._image_quality),\n 'wpredp': '0',\n 'flags': '-loop'\n },\n )\n\n self._encode_images(images, output_container, output_v_stream)\n output_container.close()\n return [(input_w, input_h)]\n\ndef _is_archive(path):\n mime = mimetypes.guess_type(path)\n mime_type = mime[0]\n encoding = mime[1]\n supportedArchives = ['application/x-rar-compressed',\n 'application/x-tar', 'application/x-7z-compressed', 'application/x-cpio',\n 'gzip', 'bzip2']\n return mime_type in supportedArchives or encoding in supportedArchives\n\ndef _is_video(path):\n mime = mimetypes.guess_type(path)\n return mime[0] is not None and mime[0].startswith('video')\n\ndef _is_image(path):\n mime = mimetypes.guess_type(path)\n # Exclude vector graphic images because Pillow cannot work with them\n return mime[0] is not None and mime[0].startswith('image') and \\\n not mime[0].startswith('image/svg')\n\ndef _is_dir(path):\n return os.path.isdir(path)\n\ndef _is_pdf(path):\n mime = mimetypes.guess_type(path)\n return mime[0] == 'application/pdf'\n\ndef _is_zip(path):\n mime = mimetypes.guess_type(path)\n mime_type = mime[0]\n encoding = mime[1]\n supportedArchives = ['application/zip']\n return mime_type in supportedArchives or encoding in supportedArchives\n\n# 'has_mime_type': function receives 1 argument - path to file.\n# Should return True if file has specified media type.\n# 'extractor': class that extracts images from specified media.\n# 'mode': 'annotation' or 'interpolation' - mode of task that should be created.\n# 'unique': True or False - describes how the type can be combined with other.\n# True - only one item of this type and no other is allowed\n# False - this media types can be combined with other which have unique == False\n\nMEDIA_TYPES = {\n 'image': {\n 'has_mime_type': _is_image,\n 'extractor': ImageListReader,\n 'mode': 'annotation',\n 'unique': False,\n },\n 'video': {\n 'has_mime_type': _is_video,\n 'extractor': VideoReader,\n 'mode': 'interpolation',\n 'unique': True,\n },\n 'archive': {\n 'has_mime_type': _is_archive,\n 'extractor': ArchiveReader,\n 'mode': 'annotation',\n 'unique': True,\n },\n 'directory': {\n 'has_mime_type': _is_dir,\n 'extractor': DirectoryReader,\n 'mode': 'annotation',\n 'unique': False,\n },\n 'pdf': {\n 'has_mime_type': _is_pdf,\n 'extractor': PdfReader,\n 'mode': 'annotation',\n 'unique': True,\n },\n 'zip': {\n 'has_mime_type': _is_zip,\n 'extractor': ZipReader,\n 'mode': 'annotation',\n 'unique': True,\n }\n}\n"
] |
[
[
"numpy.array"
]
] |
hndgzkn/alphacsc
|
[
"467cd4e6fad54aab23ea7eb6a11a8024c078d73b"
] |
[
"alphacsc/update_d_multi.py"
] |
[
"# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>\n# Tom Dupre La Tour <tom.duprelatour@telecom-paristech.fr>\n# Umut Simsekli <umut.simsekli@telecom-paristech.fr>\n# Alexandre Gramfort <alexandre.gramfort@inria.fr>\n# Thomas Moreau <thomas.moreau@inria.fr>\nimport numpy as np\n\nfrom .utils.compute_constants import compute_ztz, compute_ztX\n\n\ndef squeeze_all_except_one(X, axis=0):\n squeeze_axis = tuple(set(range(X.ndim)) - set([axis]))\n return X.squeeze(axis=squeeze_axis)\n\n\ndef check_solver_and_constraints(rank1, solver_d, uv_constraint):\n \"\"\"Checks if solver_d and uv_constraint are compatible depending on\n rank1 value.\n\n - If rank1 is False, solver_d should be 'fista' and uv_constraint should be\n 'auto'.\n - If rank1 is True;\n - If solver_d is either 'alternate' or 'alternate_adaptive',\n uv_constraint should be 'separate'.\n - If solver_d is either 'joint' or 'fista', uv_constraint should be\n 'joint'.\n\n Parameters\n ----------\n rank1: boolean\n If set to True, learn rank 1 dictionary atoms.\n solver_d : str in {'alternate' | 'alternate_adaptive' | 'fista' | 'joint' |\n 'auto'}\n The solver to use for the d update.\n - If rank1 is False, only option is 'fista'\n - If rank1 is True, options are 'alternate', 'alternate_adaptive'\n (default) or 'joint'\n uv_constraint : str in {'joint' | 'separate' | 'auto'}\n The kind of norm constraint on the atoms if using rank1=True.\n If 'joint', the constraint is norm_2([u, v]) <= 1\n If 'separate', the constraint is norm_2(u) <= 1 and norm_2(v) <= 1\n If rank1 is False, then uv_constraint must be 'auto'.\n \"\"\"\n\n if rank1:\n if solver_d == 'auto':\n solver_d = 'alternate_adaptive'\n if 'alternate' in solver_d:\n if uv_constraint == 'auto':\n uv_constraint = 'separate'\n else:\n assert uv_constraint == 'separate', (\n \"solver_d='alternate*' should be used with \"\n f\"uv_constraint='separate'. Got '{uv_constraint}'.\"\n )\n elif uv_constraint == 'auto' and solver_d in ['joint', 'fista']:\n uv_constraint = 'joint'\n else:\n assert solver_d in ['auto', 'fista'], (\n f\"solver_d should be auto or fista. Got solver_d='{solver_d}'.\"\n )\n assert solver_d in ['auto', 'fista'] and uv_constraint == 'auto', (\n \"If rank1 is False, uv_constraint should be 'auto' \"\n f\"and solver_d should be auto or fista. Got solver_d='{solver_d}' \"\n f\"and uv_constraint='{uv_constraint}'.\"\n )\n solver_d = 'fista'\n return solver_d, uv_constraint\n\n\ndef prox_uv(uv, uv_constraint='joint', n_channels=None, return_norm=False):\n if uv_constraint == 'joint':\n norm_uv = np.maximum(1, np.linalg.norm(uv, axis=1, keepdims=True))\n uv /= norm_uv\n\n elif uv_constraint == 'separate':\n assert n_channels is not None\n norm_u = np.maximum(1, np.linalg.norm(uv[:, :n_channels],\n axis=1, keepdims=True))\n norm_v = np.maximum(1, np.linalg.norm(uv[:, n_channels:],\n axis=1, keepdims=True))\n\n uv[:, :n_channels] /= norm_u\n uv[:, n_channels:] /= norm_v\n norm_uv = norm_u * norm_v\n else:\n raise ValueError('Unknown uv_constraint: %s.' % (uv_constraint, ))\n\n if return_norm:\n return uv, squeeze_all_except_one(norm_uv, axis=0)\n else:\n return uv\n\n\ndef prox_d(D, return_norm=False):\n norm_d = np.maximum(1, np.linalg.norm(D, axis=(1, 2), keepdims=True))\n D /= norm_d\n\n if return_norm:\n return D, squeeze_all_except_one(norm_d, axis=0)\n else:\n return D\n\n\ndef _get_d_update_constants(X, z):\n n_trials, n_atoms, n_times_valid = z.shape\n n_trials, n_channels, n_times = X.shape\n n_times_atom = n_times - n_times_valid + 1\n\n ztX = compute_ztX(z, X)\n ztz = compute_ztz(z, n_times_atom)\n\n constants = {}\n constants['ztX'] = ztX\n constants['ztz'] = ztz\n constants['n_channels'] = X.shape[1]\n constants['XtX'] = np.dot(X.ravel(), X.ravel())\n return constants\n"
] |
[
[
"numpy.linalg.norm"
]
] |
zhandand/DogNet
|
[
"ee15f3e057a34adf9ed9cc09d049ec0eaf8df048"
] |
[
"code/baseline/train_DMNC.py"
] |
[
"import torch\r\nimport torch.nn as nn\r\nfrom sklearn.metrics import jaccard_similarity_score, roc_auc_score, precision_score, f1_score, average_precision_score\r\nimport numpy as np\r\nimport dill\r\nimport time\r\nfrom torch.nn import CrossEntropyLoss\r\nfrom torch.optim import Adam\r\nimport os\r\nfrom collections import defaultdict\r\nimport torch.nn.functional as F\r\n\r\nimport sys\r\nsys.path.append(\"..\")\r\nfrom models import DMNC\r\nfrom util import llprint, sequence_metric, ddi_rate_score, get_n_params\r\n\r\ntorch.manual_seed(1203)\r\nmodel_name = 'DMNC'\r\nresume_name = ''\r\n\r\n'''\r\nIt's better to refer to the offical implement in tensorflow. https://github.com/thaihungle/DMNC\r\n'''\r\n\r\ndef sequence_output_process(output_logits, filter_token):\r\n pind = np.argsort(output_logits, axis=-1)[:, ::-1]\r\n out_list = []\r\n for i in range(len(pind)):\r\n for j in range(pind.shape[1]):\r\n label = pind[i][j]\r\n if label in filter_token:\r\n continue\r\n if label not in out_list:\r\n out_list.append(label)\r\n break\r\n y_pred_prob_tmp = []\r\n for idx, item in enumerate(out_list):\r\n y_pred_prob_tmp.append(output_logits[idx, item])\r\n sorted_predict = [x for _, x in sorted(zip(y_pred_prob_tmp, out_list), reverse=True)]\r\n return out_list, sorted_predict\r\n\r\ndef eval(model, data_eval, voc_size, epoch):\r\n # evaluate\r\n print('')\r\n model.eval()\r\n\r\n ja, prauc, avg_p, avg_r, avg_f1 = [[] for _ in range(5)]\r\n records = []\r\n for step, input in enumerate(data_eval):\r\n y_gt = []\r\n y_pred = []\r\n y_pred_prob = []\r\n y_pred_label = []\r\n i1_state, i2_state, i3_state = None, None, None\r\n for adm in input:\r\n y_gt_tmp = np.zeros(voc_size[2])\r\n y_gt_tmp[adm[2]] = 1\r\n y_gt.append(y_gt_tmp)\r\n\r\n output_logits, i1_state, i2_state, i3_state = model(adm, i1_state, i2_state, i3_state)\r\n output_logits = output_logits.detach().cpu().numpy()\r\n\r\n out_list, sorted_predict = sequence_output_process(output_logits, [voc_size[2], voc_size[2]+1])\r\n\r\n y_pred_label.append(sorted_predict)\r\n y_pred_prob.append(np.mean(output_logits[:,:-2], axis=0))\r\n\r\n y_pred_tmp = np.zeros(voc_size[2])\r\n y_pred_tmp[out_list] = 1\r\n y_pred.append(y_pred_tmp)\r\n records.append(y_pred_label)\r\n\r\n adm_ja, adm_prauc, adm_avg_p, adm_avg_r, adm_avg_f1 = sequence_metric(np.array(y_gt), np.array(y_pred),\r\n np.array(y_pred_prob),\r\n np.array(y_pred_label))\r\n ja.append(adm_ja)\r\n prauc.append(adm_prauc)\r\n avg_p.append(adm_avg_p)\r\n avg_r.append(adm_avg_r)\r\n avg_f1.append(adm_avg_f1)\r\n\r\n llprint('\\rEval--Epoch: %d, Step: %d/%d' % (epoch, step, len(data_eval)))\r\n\r\n # ddi rate\r\n ddi_rate = ddi_rate_score(records)\r\n llprint('\\tDDI Rate: %.4f, Jaccard: %.4f, PRAUC: %.4f, AVG_PRC: %.4f, AVG_RECALL: %.4f, AVG_F1: %.4f\\n' % (\r\n ddi_rate, np.mean(ja), np.mean(prauc), np.mean(avg_p), np.mean(avg_r), np.mean(avg_f1)\r\n ))\r\n return ddi_rate, np.mean(ja), np.mean(prauc), np.mean(avg_p), np.mean(avg_r), np.mean(avg_f1)\r\n\r\ndef main():\r\n if not os.path.exists(os.path.join(\"saved\", model_name)):\r\n os.makedirs(os.path.join(\"saved\", model_name))\r\n\r\n data_path = '../data/records_final.pkl'\r\n voc_path = '../data/voc_final.pkl'\r\n device = torch.device('cuda:0')\r\n\r\n data = dill.load(open(data_path, 'rb'))\r\n voc = dill.load(open(voc_path, 'rb'))\r\n diag_voc, pro_voc, med_voc = voc['diag_voc'], voc['pro_voc'], voc['med_voc']\r\n\r\n split_point = int(len(data) * 2 / 3)\r\n data_train = data[:split_point]\r\n eval_len = int(len(data[split_point:]) / 2)\r\n data_test = data[split_point:split_point + eval_len]\r\n data_eval = data[split_point+eval_len:]\r\n voc_size = (len(diag_voc.idx2word), len(pro_voc.idx2word), len(med_voc.idx2word))\r\n\r\n EPOCH = 30\r\n LR = 0.0005\r\n TEST = False\r\n END_TOKEN = voc_size[2] + 1\r\n\r\n model = DMNC(voc_size, device=device)\r\n if TEST:\r\n model.load_state_dict(torch.load(open(os.path.join(\"saved\", model_name, resume_name), 'rb')))\r\n model.to(device=device)\r\n print('parameters', get_n_params(model))\r\n\r\n criterion2 = nn.CrossEntropyLoss().to(device)\r\n optimizer = Adam(model.parameters(), lr=LR)\r\n\r\n if TEST:\r\n eval(model, data_test, voc_size, 0)\r\n else:\r\n history = defaultdict(list)\r\n for epoch in range(EPOCH):\r\n loss_record1 = []\r\n loss_record2 = []\r\n start_time = time.time()\r\n model.train()\r\n for step, input in enumerate(data_train):\r\n i1_state, i2_state, i3_state = None, None, None\r\n for adm in input:\r\n loss_target = adm[2] + [END_TOKEN]\r\n output_logits, i1_state, i2_state, i3_state = model(adm, i1_state, i2_state, i3_state)\r\n loss = criterion2(output_logits, torch.LongTensor(loss_target).to(device))\r\n\r\n loss_record1.append(loss.item())\r\n loss_record2.append(loss.item())\r\n\r\n optimizer.zero_grad()\r\n loss.backward(retain_graph=True)\r\n optimizer.step()\r\n\r\n llprint('\\rTrain--Epoch: %d, Step: %d/%d' % (epoch, step, len(data_train)))\r\n\r\n ddi_rate, ja, prauc, avg_p, avg_r, avg_f1 = eval(model, data_eval, voc_size, epoch)\r\n history['ja'].append(ja)\r\n history['ddi_rate'].append(ddi_rate)\r\n history['avg_p'].append(avg_p)\r\n history['avg_r'].append(avg_r)\r\n history['avg_f1'].append(avg_f1)\r\n history['prauc'].append(prauc)\r\n\r\n end_time = time.time()\r\n elapsed_time = (end_time - start_time) / 60\r\n llprint('\\tEpoch: %d, Loss1: %.4f, Loss2: %.4f, One Epoch Time: %.2fm, Appro Left Time: %.2fh\\n' % (epoch,\r\n np.mean(loss_record1),\r\n np.mean(loss_record2),\r\n elapsed_time,\r\n elapsed_time * (\r\n EPOCH - epoch - 1)/60))\r\n\r\n torch.save(model.state_dict(), open( os.path.join('saved', model_name, 'Epoch_%d_JA_%.4f_DDI_%.4f.model' % (epoch, ja, ddi_rate)), 'wb'))\r\n print('')\r\n\r\n dill.dump(history, open(os.path.join('saved', model_name, 'history.pkl'), 'wb'))\r\n\r\n # test\r\n torch.save(model.state_dict(), open(\r\n os.path.join('saved', model_name, 'final.model'), 'wb'))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"torch.manual_seed",
"numpy.mean",
"torch.device",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
]
] |
liujuanLT/facenet
|
[
"58a6fdaa1b6e62dd8c781d376b83d13c86cdd76e"
] |
[
"src/models/densenet.py"
] |
[
"\"\"\"Contains the definition of the DenseNet architecture.\n\nAs described in https://arxiv.org/abs/1608.06993.\n\n Densely Connected Convolutional Networks\n Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\n@slim.add_arg_scope\ndef _global_avg_pool2d(inputs, data_format='NHWC', scope=None, outputs_collections=None):\n with tf.variable_scope(scope, 'xx', [inputs]) as sc:\n axis = [1, 2] if data_format == 'NHWC' else [2, 3]\n net = tf.reduce_mean(inputs, axis=axis, keep_dims=True)\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\n return net\n\n\n@slim.add_arg_scope\ndef _conv(inputs, num_filters, kernel_size, stride=1, dropout_rate=None,\n scope=None, outputs_collections=None):\n with tf.variable_scope(scope, 'xx', [inputs]) as sc:\n net = slim.batch_norm(inputs)\n net = tf.nn.relu(net)\n net = slim.conv2d(net, num_filters, kernel_size)\n\n if dropout_rate:\n net = tf.nn.dropout(net)\n\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\n\n return net\n\n\n@slim.add_arg_scope\ndef _conv_block(inputs, num_filters, data_format='NHWC', scope=None, outputs_collections=None):\n with tf.variable_scope(scope, 'conv_blockx', [inputs]) as sc:\n net = inputs\n net = _conv(net, num_filters*4, 1, scope='x1')\n net = _conv(net, num_filters, 3, scope='x2')\n if data_format == 'NHWC':\n net = tf.concat([inputs, net], axis=3)\n else: # \"NCHW\"\n net = tf.concat([inputs, net], axis=1)\n\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\n\n return net\n\n\n@slim.add_arg_scope\ndef _dense_block(inputs, num_layers, num_filters, growth_rate,\n grow_num_filters=True, scope=None, outputs_collections=None):\n\n with tf.variable_scope(scope, 'dense_blockx', [inputs]) as sc:\n net = inputs\n for i in range(num_layers):\n branch = i + 1\n net = _conv_block(net, growth_rate, scope='conv_block'+str(branch))\n\n if grow_num_filters:\n num_filters += growth_rate\n\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\n\n return net, num_filters\n\n\n@slim.add_arg_scope\ndef _transition_block(inputs, num_filters, compression=1.0,\n scope=None, outputs_collections=None):\n\n num_filters = int(num_filters * compression)\n with tf.variable_scope(scope, 'transition_blockx', [inputs]) as sc:\n net = inputs\n net = _conv(net, num_filters, 1, scope='blk')\n\n net = slim.avg_pool2d(net, 2)\n\n net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)\n\n return net, num_filters\n\n\ndef densenet(inputs,\n dropout_keep_prob=0.8,\n bottleneck_layer_size=128,\n reduction=None,\n growth_rate=None,\n num_filters=None,\n num_layers=None,\n dropout_rate=None,\n data_format='NHWC',\n is_training=True,\n reuse=None,\n scope=None):\n assert reduction is not None\n assert growth_rate is not None\n assert num_filters is not None\n assert num_layers is not None\n\n compression = 1.0 - reduction\n num_dense_blocks = len(num_layers)\n\n if data_format == 'NCHW':\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n with tf.variable_scope(scope, 'densenetxxx', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.name + '_end_points'\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training), \\\n slim.arg_scope([slim.conv2d, _conv, _conv_block,\n _dense_block, _transition_block], \n outputs_collections=end_points_collection), \\\n slim.arg_scope([_conv], dropout_rate=dropout_rate):\n net = inputs\n\n # initial convolution\n net = slim.conv2d(net, num_filters, 7, stride=2, scope='conv1')\n net = slim.batch_norm(net)\n net = tf.nn.relu(net)\n net = slim.max_pool2d(net, 3, stride=2, padding='SAME')\n\n # blocks\n for i in range(num_dense_blocks - 1):\n # dense blocks\n net, num_filters = _dense_block(net, num_layers[i], num_filters,\n growth_rate,\n scope='dense_block' + str(i+1))\n\n # Add transition_block\n net, num_filters = _transition_block(net, num_filters,\n compression=compression,\n scope='transition_block' + str(i+1))\n\n net, num_filters = _dense_block(\n net, num_layers[-1], num_filters,\n growth_rate,\n scope='dense_block' + str(num_dense_blocks))\n\n # final blocks\n with tf.variable_scope('final_block', [inputs]):\n net = slim.batch_norm(net)\n net = tf.nn.relu(net)\n net = _global_avg_pool2d(net, scope='global_avg_pool')\n\n # net = slim.conv2d(net, 1000, 1,\n # biases_initializer=tf.zeros_initializer(),\n # scope='logits')\n\n net = slim.conv2d(net, bottleneck_layer_size, 1,\n biases_initializer=tf.zeros_initializer(),\n scope='logits')\n\n net = tf.squeeze(net, [1, 2], name='logits') # ADD\n\n #net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n # scope='Dropout') # ADD\n\n # net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, \n # scope='Bottleneck', reuse=False) # ADD\n\n end_points = slim.utils.convert_collection_to_dict(\n end_points_collection)\n\n return net, end_points\n\n\ndef inference(images, keep_probability, phase_train=True, \n bottleneck_layer_size=128, weight_decay=1e-4, reuse=None,\n batch_norm_decay=0.99,\n batch_norm_epsilon=1.1e-5,\n data_format='NHWC'):\n with slim.arg_scope([slim.conv2d, slim.batch_norm, slim.avg_pool2d, slim.max_pool2d,\n _conv_block, _global_avg_pool2d],\n data_format=data_format):\n with slim.arg_scope([slim.conv2d],\n weights_regularizer=slim.l2_regularizer(weight_decay),\n activation_fn=None,\n biases_initializer=None):\n with slim.arg_scope([slim.batch_norm],\n scale=True,\n decay=batch_norm_decay,\n epsilon=batch_norm_epsilon):\n return densenet121(images, is_training=phase_train, \n dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)\n\ndef densenet121(inputs, is_training=True, \n dropout_keep_prob=0.8,\n bottleneck_layer_size=128, reuse=None, data_format='NHWC'):\n return densenet(inputs,\n dropout_keep_prob=dropout_keep_prob,\n bottleneck_layer_size=bottleneck_layer_size, \n reduction=0.5,\n growth_rate=32,\n num_filters=64,\n num_layers=[6,12,24,16],\n data_format=data_format,\n is_training=is_training,\n reuse=reuse,\n scope='densenet121')\n#densenet121.default_image_size = 224\n\n"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.zeros_initializer",
"tensorflow.squeeze",
"tensorflow.variable_scope",
"tensorflow.nn.dropout"
]
] |
Avin0323/Paddle
|
[
"a615002abdfe8cfdab78f1b7b344ef2939345548"
] |
[
"python/paddle/fluid/tests/unittests/test_egr_python_api.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle.fluid.core as core\nimport paddle.fluid.eager.eager_tensor_patch_methods as eager_tensor_patch_methods\nimport paddle\nimport numpy as np\nfrom paddle.fluid.framework import _test_eager_guard\nfrom paddle.fluid.data_feeder import convert_dtype\nimport unittest\n\n\nclass EagerScaleTestCase(unittest.TestCase):\n def test_scale_base(self):\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n tensor = paddle.to_tensor(arr, 'float32', core.CPUPlace())\n print(tensor)\n tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)\n for i in range(0, 100):\n tensor = core.eager.scale(tensor, 2.0, 0.9, True, False)\n print(tensor)\n self.assertEqual(tensor.shape, [4, 16, 16, 32])\n self.assertEqual(tensor.stop_gradient, True)\n\n def test_retain_grad_and_run_backward(self):\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n\n input_data = np.ones([4, 16, 16, 32]).astype('float32')\n data_eager = paddle.to_tensor(input_data, 'float32',\n core.CPUPlace(), False)\n\n grad_data = np.ones([4, 16, 16, 32]).astype('float32')\n grad_eager = paddle.to_tensor(grad_data, 'float32', core.CPUPlace())\n\n core.eager.retain_grad_for_tensor(data_eager)\n\n out_eager = core.eager.scale(data_eager, 1.0, 0.9, True, True)\n self.assertFalse(data_eager.grad._is_initialized())\n core.eager.run_backward([out_eager], [grad_eager], False)\n self.assertTrue(data_eager.grad._is_initialized())\n self.assertTrue(np.array_equal(data_eager.grad.numpy(), input_data))\n\n\nclass EagerDtypeTestCase(unittest.TestCase):\n def check_to_tesnsor_and_numpy(self, dtype, proto_dtype):\n with _test_eager_guard():\n arr = np.random.random([4, 16, 16, 32]).astype(dtype)\n tensor = paddle.to_tensor(arr, dtype)\n self.assertEqual(tensor.dtype, proto_dtype)\n self.assertTrue(np.array_equal(arr, tensor.numpy()))\n\n def test_dtype_base(self):\n print(\"Test_dtype\")\n self.check_to_tesnsor_and_numpy('bool', core.VarDesc.VarType.BOOL)\n self.check_to_tesnsor_and_numpy('int8', core.VarDesc.VarType.INT8)\n self.check_to_tesnsor_and_numpy('uint8', core.VarDesc.VarType.UINT8)\n self.check_to_tesnsor_and_numpy('int16', core.VarDesc.VarType.INT16)\n self.check_to_tesnsor_and_numpy('int32', core.VarDesc.VarType.INT32)\n self.check_to_tesnsor_and_numpy('int64', core.VarDesc.VarType.INT64)\n self.check_to_tesnsor_and_numpy('float16', core.VarDesc.VarType.FP16)\n self.check_to_tesnsor_and_numpy('float32', core.VarDesc.VarType.FP32)\n self.check_to_tesnsor_and_numpy('float64', core.VarDesc.VarType.FP64)\n self.check_to_tesnsor_and_numpy('complex64',\n core.VarDesc.VarType.COMPLEX64)\n self.check_to_tesnsor_and_numpy('complex128',\n core.VarDesc.VarType.COMPLEX128)\n\n\nclass EagerTensorPropertiesTestCase(unittest.TestCase):\n def constructor(self, place):\n egr_tensor = core.eager.EagerTensor()\n self.assertEqual(egr_tensor.persistable, False)\n self.assertTrue(\"generated\" in egr_tensor.name)\n self.assertEqual(egr_tensor.shape, [])\n self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor.stop_gradient, True)\n\n egr_tensor0 = core.eager.EagerTensor(\n core.VarDesc.VarType.FP32, [4, 16, 16, 32], \"test_eager_tensor\",\n core.VarDesc.VarType.LOD_TENSOR, True)\n self.assertEqual(egr_tensor0.persistable, True)\n self.assertEqual(egr_tensor0.name, \"test_eager_tensor\")\n self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)\n\n arr0 = np.random.rand(4, 16, 16, 32).astype('float32')\n egr_tensor1 = core.eager.EagerTensor(arr0, place, True, False,\n \"numpy_tensor1\", False)\n self.assertEqual(egr_tensor1.persistable, True)\n self.assertEqual(egr_tensor1.name, \"numpy_tensor1\")\n self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor1.stop_gradient, False)\n self.assertTrue(egr_tensor1.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))\n\n arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)\n egr_tensor2 = core.eager.EagerTensor(arr1, place, False, True,\n \"numpy_tensor2\", True)\n self.assertEqual(egr_tensor2.persistable, False)\n self.assertEqual(egr_tensor2.name, \"numpy_tensor2\")\n self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.INT64)\n self.assertEqual(egr_tensor2.stop_gradient, True)\n self.assertTrue(egr_tensor2.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))\n\n arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')\n egr_tensor3 = core.eager.EagerTensor(arr2)\n self.assertEqual(egr_tensor3.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor3.name)\n self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])\n self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor3.stop_gradient, True)\n self.assertTrue(\n egr_tensor3.place._equals(\n paddle.fluid.framework._current_expected_place()))\n self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))\n\n egr_tensor3.stop_gradient = False\n egr_tensor4 = core.eager.EagerTensor(egr_tensor3)\n self.assertEqual(egr_tensor4.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor4.name)\n self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)\n self.assertEqual(egr_tensor4.dtype, egr_tensor3.dtype)\n self.assertEqual(egr_tensor4.stop_gradient, True)\n self.assertTrue(\n egr_tensor4.place._equals(\n paddle.fluid.framework._current_expected_place()))\n self.assertTrue(\n np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()))\n\n arr4 = np.random.rand(4, 16, 16, 32).astype('float32')\n egr_tensor5 = core.eager.EagerTensor(arr4, place)\n self.assertEqual(egr_tensor5.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor5.name)\n self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor5.stop_gradient, True)\n self.assertTrue(egr_tensor5.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))\n\n egr_tensor6 = core.eager.EagerTensor(egr_tensor5, core.CPUPlace())\n self.assertEqual(egr_tensor6.persistable, False)\n self.assertTrue(\"generated_tensor\" in egr_tensor6.name)\n self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor6.stop_gradient, True)\n self.assertEqual(egr_tensor6.place.is_cpu_place(), True)\n self.assertTrue(\n np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()))\n\n egr_tensor7 = core.eager.EagerTensor(arr4, place, True)\n self.assertEqual(egr_tensor7.persistable, True)\n self.assertTrue(\"generated_tensor\" in egr_tensor7.name)\n self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor7.stop_gradient, True)\n self.assertTrue(egr_tensor7.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))\n\n egr_tensor8 = core.eager.EagerTensor(egr_tensor6, place, \"egr_tensor8\")\n self.assertEqual(egr_tensor8.persistable, False)\n self.assertEqual(egr_tensor8.name, \"egr_tensor8\")\n self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor8.stop_gradient, True)\n self.assertTrue(egr_tensor8.place._equals(place))\n self.assertTrue(\n np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()))\n\n egr_tensor9 = core.eager.EagerTensor(arr4, place, True, True)\n self.assertEqual(egr_tensor9.persistable, True)\n self.assertTrue(\"generated_tensor\" in egr_tensor9.name)\n self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])\n self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)\n self.assertEqual(egr_tensor9.stop_gradient, True)\n self.assertTrue(egr_tensor9.place._equals(place))\n self.assertTrue(np.array_equal(egr_tensor9.numpy(), arr4))\n\n def test_constructor(self):\n print(\"Test_constructor\")\n paddle.set_device(\"cpu\")\n place_list = [core.CPUPlace()]\n if core.is_compiled_with_cuda():\n place_list.append(core.CUDAPlace(0))\n with _test_eager_guard():\n for p in place_list:\n self.constructor(p)\n\n def test_copy_and_copy_to(self):\n print(\"Test_copy_and_copy_to\")\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n arr1 = np.zeros([4, 16]).astype('float32')\n arr2 = np.ones([4, 16, 16, 32]).astype('float32') + np.ones(\n [4, 16, 16, 32]).astype('float32')\n tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n self.assertEqual(tensor.stop_gradient, True)\n tensor.stop_gradient = False\n print(\"Set persistable\")\n tensor.persistable = False\n tensor1 = paddle.to_tensor(arr1, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n tensor1.persistable = True\n self.assertEqual(tensor1.stop_gradient, True)\n self.assertTrue(np.array_equal(tensor.numpy(), arr))\n print(\"Test copy_\")\n tensor.copy_(tensor1, True)\n self.assertEqual(tensor.persistable, True)\n self.assertEqual(tensor.shape, [4, 16])\n self.assertEqual(tensor.dtype, core.VarDesc.VarType.FP32)\n self.assertTrue(np.array_equal(tensor.numpy(), arr1))\n\n print(\"Test _copy_to\")\n tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n self.assertTrue(np.array_equal(tensor2.numpy(), arr2))\n self.assertTrue(tensor2.place.is_cpu_place())\n tensor2.persistable = True\n tensor2.stop_gradient = False\n if core.is_compiled_with_cuda():\n tensor3 = tensor2._copy_to(True, core.CUDAPlace(0))\n self.assertTrue(np.array_equal(tensor3.numpy(), arr2))\n self.assertTrue(tensor3.persistable, True)\n self.assertTrue(tensor3.stop_gradient, True)\n self.assertTrue(tensor3.place.is_gpu_place())\n else:\n tensor3 = tensor2._copy_to(True, core.CPUPlace())\n self.assertTrue(np.array_equal(tensor3.numpy(), arr2))\n self.assertTrue(tensor3.persistable, True)\n self.assertTrue(tensor3.stop_gradient, True)\n self.assertTrue(tensor3.place.is_cpu_place())\n\n def test_properties(self):\n print(\"Test_properties\")\n with _test_eager_guard():\n paddle.set_device(\"cpu\")\n arr = np.ones([4, 16, 16, 32]).astype('float32')\n tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,\n core.CPUPlace())\n self.assertEqual(tensor.shape, [4, 16, 16, 32])\n tensor.name = 'tensor_name_test'\n self.assertEqual(tensor.name, 'tensor_name_test')\n self.assertEqual(tensor.persistable, False)\n tensor.persistable = True\n self.assertEqual(tensor.persistable, True)\n tensor.persistable = False\n self.assertEqual(tensor.persistable, False)\n self.assertTrue(tensor.place.is_cpu_place())\n self.assertEqual(tensor._place_str, 'CPUPlace')\n self.assertEqual(tensor.stop_gradient, True)\n tensor.stop_gradient = False\n self.assertEqual(tensor.stop_gradient, False)\n tensor.stop_gradient = True\n self.assertEqual(tensor.stop_gradient, True)\n\n def test_global_properties(self):\n print(\"Test_global_properties\")\n self.assertFalse(core._in_eager_mode())\n with _test_eager_guard():\n self.assertTrue(core._in_eager_mode())\n self.assertFalse(core._in_eager_mode())\n\n def test_place_guard(self):\n core._enable_eager_mode()\n if core.is_compiled_with_cuda():\n paddle.set_device(\"gpu:0\")\n with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):\n self.assertTrue(core.eager._get_expected_place().is_cpu_place())\n else:\n paddle.set_device(\"cpu\")\n with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):\n self.assertTrue(core.eager._get_expected_place().is_cpu_place())\n core._disable_eager_mode()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.random.random",
"numpy.ones",
"numpy.random.rand",
"numpy.zeros",
"numpy.random.randint"
]
] |
yurilavinas/failure_diversity_maximisation
|
[
"24d5b90455eb554bc91b6092df53d83f4b1023df"
] |
[
"fdm/fdm_code/domain/wann_task_gym.py"
] |
[
"import random\nimport numpy as np\nimport sys\nfrom domain.make_env import make_env\nfrom domain.task_gym import GymTask\nfrom neat_src import *\nimport math\n\n\nclass WannGymTask(GymTask):\n \"\"\"Problem domain to be solved by neural network. Uses OpenAI Gym patterns.\n \"\"\" \n def __init__(self, game, paramOnly=False, nReps=1): \n \"\"\"Initializes task environment\n \n Args:\n game - (string) - dict key of task to be solved (see domain/config.py)\n \n Optional:\n paramOnly - (bool) - only load parameters instead of launching task?\n nReps - (nReps) - number of trials to get average fitness\n \"\"\"\n GymTask.__init__(self, game, paramOnly, nReps)\n\n\n# -- 'Weight Agnostic Network' evaluation -------------------------------- -- #\n def setWeights(self, wVec, wVal):\n \"\"\"Set single shared weight of network\n \n Args:\n wVec - (np_array) - weight matrix as a flattened vector\n [N**2 X 1]\n wVal - (float) - value to assign to all weights\n \n Returns:\n wMat - (np_array) - weight matrix with single shared weight\n [N X N]\n \"\"\"\n # Create connection matrix\n wVec[np.isnan(wVec)] = 0\n dim = int(np.sqrt(np.shape(wVec)[0])) \n cMat = np.reshape(wVec,(dim,dim))\n cMat[cMat!=0] = 1.0\n\n # Assign value to all weights\n wMat = np.copy(cMat) * wVal \n return wMat\n\n\n def getFitness(self, wVec, aVec, hyp, \\\n nRep=False,seed=-1, nVals=8,view=False,returnVals=False):\n \"\"\"Get fitness of a single individual with distribution of weights\n \n Args:\n wVec - (np_array) - weight matrix as a flattened vector\n [N**2 X 1]\n aVec - (np_array) - activation function of each node \n [N X 1] - stored as ints (see applyAct in ann.py)\n hyp - (dict) - hyperparameters\n ['alg_wDist'] - weight distribution [standard;fixed;linspace]\n ['alg_absWCap'] - absolute value of highest weight for linspace\n \n Optional:\n seed - (int) - starting random seed for trials\n nReps - (int) - number of trials to get average fitness\n nVals - (int) - number of weight values to test\n\n \n Returns:\n fitness - (float) - mean reward over all trials\n \"\"\"\n if nRep is False:\n nRep = hyp['alg_nReps']\n\n # Set weight values to test WANN with\n if (hyp['alg_wDist'] == \"standard\") and nVals==8: # Double, constant, and half signal \n wVals = np.array((-2,2,0.5,-1.5,-1.0,-0.5,1.0,1.5))\n else:\n wVals = np.linspace(-self.absWCap, self.absWCap ,nVals)\n\n\n # Get reward from 'reps' rollouts -- test population on same seeds\n reward = np.empty((nRep,nVals))\n cos = [[]] * nVals\n sin = [[]] * nVals\n pos_x = [[]] * nVals\n count = [[]] * nVals\n \n for iRep in range(nRep):\n for iVal in range(nVals):\n wMat = self.setWeights(wVec,wVals[iVal])\n \n if seed == -1:\n reward[iRep,iVal] = self.testInd(wMat, aVec, view=view, seed=42, returnVals=returnVals)\n else:\n reward[iRep,iVal] = self.testInd(wMat, aVec, seed=42,view=view, returnVals=returnVals)\n cos[iVal] = self.cos\n sin[iVal] = self.sin\n pos_x[iVal] = self.pos_x\n\n \n\n if(hyp['alg_selection'] == \"count\"):\n\n pos_x_ = pos_x[iVal]\n cos = cos[iVal]\n sin = sin[iVal]\n \n x = np.array([i*0.6 for i in cos]) + pos_x_\n y = np.array([i*0.6 for i in sin]) + pos_x_\n \n if x[len(x)-1] < -2.4:\n count[iVal] = -1 # -1 - exit to the left\n elif x[len(x)-1] > 2.4: \n count[iVal] = 1 # 1 - exit to the right\n \n else:\n tmp = y > 0\n j = 0\n count[iVal] = 1\n for i in range(len(tmp)):\n if j != i:\n if tmp[j]!= tmp[i]: \n j = i\n count[iVal] += 1 # couting transitions between below and above horizontal line\n count[iVal] = count[iVal]/2 # times the pole move to an upward position\n\n if count[iVal] == 0.5:\n count[iVal] = 0 # 0 - success\n else:\n left = sum(x > 0) \n right = sum(x < 0)\n if (left != right):\n idx = np.argmax(np.asarray([left, right]))\n if (idx == 0):\n count[iVal] = -2 # -2 - looping to the left\n else:\n count[iVal] = 2 # 2 - looping to the right\n else:\n count[iVal] = 3 # 3 - looping both ways, equally\n\n if returnVals is True:\n return np.mean(reward,axis=0), np.std(reward,axis=0), wVals, cos, sin, pos_x\n elif(hyp['alg_selection'] == \"count\"): \n count = len(np.unique(count))\n return np.mean(reward,axis=0), count\n else:\n return np.mean(reward,axis=0)\n\n"
] |
[
[
"numpy.linspace",
"numpy.unique",
"numpy.reshape",
"numpy.isnan",
"numpy.asarray",
"numpy.copy",
"numpy.std",
"numpy.mean",
"numpy.shape",
"numpy.array",
"numpy.empty"
]
] |
vhosouza/xcoord
|
[
"9226a6f919b3edec933753ff17815092ab95df9a",
"9226a6f919b3edec933753ff17815092ab95df9a"
] |
[
"visualization/visualize_tms_scene.py",
"tractography/vtk_inv_tracts.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# xcoord - Tools for cross-software spatial coordinate manipulation\n#\n# This file is part of xcoord package which is released under copyright.\n# See file LICENSE or go to website for full license details.\n# Copyright (C) 2018 Victor Hugo Souza - All Rights Reserved\n#\n# Homepage: https://github.com/vhosouza/xcoord\n# Contact: victor.souza@aalto.fi\n# License: MIT License\n#\n# Authors: Victor Hugo Souza\n# Date/version: 10.4.2019\n\nimport os\n\nimport nibabel as nb\nimport numpy as np\nfrom scipy import io\nimport transformations as tf\nimport vtk\n\nfrom markers import nexstim2mri as n2m, load_coords as lc\n\n\ndef main():\n\n SHOW_AXES = True\n SHOW_SCENE_AXES = True\n SHOW_COIL_AXES = True\n SHOW_SKIN = True\n SHOW_BRAIN = True\n SHOW_COIL = True\n SHOW_MARKERS = True\n TRANSF_COIL = True\n SHOW_PLANE = False\n SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp'\n SAVE_ID = False\n AFFINE_IMG = True\n NO_SCALE = True\n SCREENSHOT = False\n\n reorder = [0, 2, 1]\n flipx = [True, False, False]\n\n # reorder = [0, 1, 2]\n # flipx = [False, False, False]\n\n # default folder and subject\n subj = 'S5' # 1 - 9\n id_extra = False # 8, 9, 10, 12, False\n data_dir = r'P:\\tms_eeg\\mTMS\\projects\\2016 Lateral ppTMS M1\\E-fields'\n simnibs_dir = os.path.join(data_dir, 'simnibs', 'm2m_ppM1_{}_nc'.format(subj))\n\n if id_extra and subj == 'S1':\n subj_id = '{}_{}'.format(subj, id_extra)\n else:\n subj_id = '{}'.format(subj)\n\n nav_dir = os.path.join(data_dir, 'nav_coordinates', 'ppM1_{}'.format(subj_id))\n\n # filenames\n coil_file = os.path.join(os.environ['OneDrive'], 'data', 'nexstim_coord', 'magstim_fig8_coil.stl')\n\n coord_file = os.path.join(nav_dir, 'ppM1_eximia_{}.txt'.format(subj_id))\n\n img_file = os.path.join(data_dir, r'mri\\ppM1_{}\\ppM1_{}.nii'.format(subj, subj))\n brain_file = os.path.join(simnibs_dir, 'wm.stl')\n skin_file = os.path.join(simnibs_dir, 'skin.stl')\n\n output_file = os.path.join(nav_dir, 'transf_mat_{}'.format(subj_id))\n\n coords = lc.load_nexstim(coord_file)\n # red, green, blue, maroon (dark red),\n # olive (shitty green), teal (petrol blue), yellow, orange\n col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.],\n [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]]\n\n # extract image header shape and affine transformation from original nifti file\n imagedata = nb.squeeze_image(nb.load(img_file))\n imagedata = nb.as_closest_canonical(imagedata)\n imagedata.update_header()\n pix_dim = imagedata.header.get_zooms()\n img_shape = imagedata.header.get_data_shape()\n\n print(\"Pixel size: \\n\")\n print(pix_dim)\n print(\"\\nImage shape: \\n\")\n print(img_shape)\n\n affine_aux = imagedata.affine.copy()\n if NO_SCALE:\n scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine)\n affine_aux = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp)\n\n if AFFINE_IMG:\n affine = affine_aux\n # if NO_SCALE:\n # scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine)\n # affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp)\n else:\n affine = np.identity(4)\n # affine_I = np.identity(4)\n\n # create a camera, render window and renderer\n camera = vtk.vtkCamera()\n camera.SetPosition(0, 1000, 0)\n camera.SetFocalPoint(0, 0, 0)\n camera.SetViewUp(0, 0, 1)\n camera.ComputeViewPlaneNormal()\n camera.Azimuth(90.0)\n camera.Elevation(10.0)\n\n ren = vtk.vtkRenderer()\n ren.SetActiveCamera(camera)\n ren.ResetCamera()\n camera.Dolly(1.5)\n\n ren_win = vtk.vtkRenderWindow()\n ren_win.AddRenderer(ren)\n ren_win.SetSize(800, 800)\n\n # create a renderwindowinteractor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(ren_win)\n\n if SELECT_LANDMARKS == 'mri':\n # MRI landmarks\n coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']]\n pts_ref = [1, 2, 3, 7, 10]\n elif SELECT_LANDMARKS == 'all':\n # all coords\n coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'],\n ['Coil Loc'], ['EF max']]\n pts_ref = [1, 2, 3, 5, 4, 6, 7, 10]\n elif SELECT_LANDMARKS == 'scalp':\n # scalp landmarks\n coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']]\n hdr_mri = ['Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max']\n pts_ref = [5, 4, 6, 7, 10]\n\n coords_np = np.zeros([len(pts_ref), 3])\n\n for n, pts_id in enumerate(pts_ref):\n # to keep in the MRI space use the identity as the affine\n # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder)\n # affine_trans = affine_I.copy()\n # affine_trans = affine.copy()\n # affine_trans[:3, -1] = affine[:3, -1]\n coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder)\n coords_np[n, :] = coord_aux\n [coord_mri[n].append(s) for s in coord_aux]\n\n if SHOW_MARKERS:\n marker_actor = add_marker(coord_aux, ren, col[n])\n\n if id_extra:\n # compare coil locations in experiments with 8, 9, 10 and 12 mm shifts\n # MRI Nexstim space: 8, 9, 10, 12 mm coil locations\n # coord_others = [[122.2, 198.8, 99.7],\n # [121.1, 200.4, 100.1],\n # [120.5, 200.7, 98.2],\n # [117.7, 202.9, 96.6]]\n if AFFINE_IMG:\n # World space: 8, 9, 10, 12 mm coil locations\n coord_others = [[-42.60270233154297, 28.266497802734378, 81.02450256347657],\n [-41.50270233154296, 28.66649780273437, 82.62450256347657],\n [-40.90270233154297, 26.766497802734378, 82.92450256347655],\n [-38.10270233154297, 25.16649780273437, 85.12450256347657]]\n else:\n # MRI space reordered and flipped: 8, 9, 10, 12 mm coil locations\n coord_others = [[27.8, 99.7, 198.8],\n [28.9, 100.1, 200.4],\n [29.5, 98.2, 200.7],\n [32.3, 96.6, 202.9]]\n\n col_others = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]\n for n, c in enumerate(coord_others):\n marker_actor = add_marker(c, ren, col_others[n])\n\n print('\\nOriginal coordinates from Nexstim: \\n')\n [print(s) for s in coords]\n print('\\nMRI coordinates flipped and reordered: \\n')\n [print(s) for s in coords_np]\n print('\\nTransformed coordinates to MRI space: \\n')\n [print(s) for s in coord_mri]\n\n # coil location, normal vector and direction vector\n coil_loc = coord_mri[-2][1:]\n coil_norm = coords[8][1:]\n coil_dir = coords[9][1:]\n\n # creating the coil coordinate system by adding a point in the direction of each given coil vector\n # the additional vector is just the cross product from coil direction and coil normal vectors\n # origin of the coordinate system is the coil location given by Nexstim\n # the vec_length is to allow line creation with visible length in VTK scene\n vec_length = 75\n p1 = coords[7][1:]\n p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)]\n p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder)\n\n p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)]\n p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder)\n\n coil_face = np.cross(coil_norm, coil_dir)\n p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())]\n p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder)\n\n # Coil face unit vector (X)\n u1 = np.asarray(p2_face) - np.asarray(coil_loc)\n u1_n = u1 / np.linalg.norm(u1)\n # Coil direction unit vector (Y)\n u2 = np.asarray(p2_dir) - np.asarray(coil_loc)\n u2_n = u2 / np.linalg.norm(u2)\n # Coil normal unit vector (Z)\n u3 = np.asarray(p2_norm) - np.asarray(coil_loc)\n u3_n = u3 / np.linalg.norm(u3)\n\n transf_matrix = np.identity(4)\n if TRANSF_COIL:\n transf_matrix[:3, 0] = u1_n\n transf_matrix[:3, 1] = u2_n\n transf_matrix[:3, 2] = u3_n\n transf_matrix[:3, 3] = coil_loc[:]\n\n # the absolute value of the determinant indicates the scaling factor\n # the sign of the determinant indicates how it affects the orientation: if positive maintain the\n # original orientation and if negative inverts all the orientations (flip the object inside-out)'\n # the negative determinant is what makes objects in VTK scene to become black\n print('Transformation matrix: \\n', transf_matrix, '\\n')\n print('Determinant: ', np.linalg.det(transf_matrix))\n\n if SAVE_ID:\n coord_dict = {'m_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np}\n io.savemat(output_file + '.mat', coord_dict)\n hdr_names = ';'.join(['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)])\n np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names)\n\n if SHOW_BRAIN:\n if AFFINE_IMG:\n brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1.)\n else:\n # to visualize brain in MRI space\n brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1., user_matrix=np.linalg.inv(affine_aux))\n if SHOW_SKIN:\n if AFFINE_IMG:\n skin_actor = load_stl(skin_file, ren, colour=\"SkinColor\", opacity=.4)\n else:\n # to visualize skin in MRI space\n skin_actor = load_stl(skin_file, ren, colour=\"SkinColor\", opacity=.4, user_matrix=np.linalg.inv(affine_aux))\n\n if SHOW_COIL:\n # reposition STL object prior to transformation matrix\n # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z]\n # old translation when using Y as normal vector\n # repos = [0., -6., 0., 0., -90., 90.]\n # Translate coil loc coordinate to coil bottom\n # repos = [0., 0., 5.5, 0., 0., 180.]\n repos = [0., 0., 0., 0., 0., 180.]\n # SimNIBS coil orientation requires 180deg around Y\n # Ry = tf.rotation_matrix(np.pi, [0, 1, 0], [0, 0, 0])\n # transf_matrix = transf_matrix @ Ry\n act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3)\n\n if SHOW_PLANE:\n act_plane = add_plane(ren, user_matrix=transf_matrix)\n\n # Add axes to scene origin\n if SHOW_AXES:\n add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0])\n add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0])\n add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0])\n\n # Add axes to object origin\n if SHOW_COIL_AXES:\n add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0])\n add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0])\n add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0])\n\n # Add interactive axes to scene\n if SHOW_SCENE_AXES:\n axes = vtk.vtkAxesActor()\n widget = vtk.vtkOrientationMarkerWidget()\n widget.SetOutlineColor(0.9300, 0.5700, 0.1300)\n widget.SetOrientationMarker(axes)\n widget.SetInteractor(iren)\n # widget.SetViewport(0.0, 0.0, 0.4, 0.4)\n widget.SetEnabled(1)\n widget.InteractiveOn()\n\n if SCREENSHOT:\n # screenshot of VTK scene\n w2if = vtk.vtkWindowToImageFilter()\n w2if.SetInput(ren_win)\n w2if.Update()\n\n writer = vtk.vtkPNGWriter()\n writer.SetFileName(\"screenshot.png\")\n writer.SetInput(w2if.GetOutput())\n writer.Write()\n\n # Enable user interface interactor\n # ren_win.Render()\n\n ren.ResetCameraClippingRange()\n\n iren.Initialize()\n iren.Start()\n\n\ndef add_marker(coord, ren, color):\n # x, y, z = coord\n\n ball_ref = vtk.vtkSphereSource()\n ball_ref.SetRadius(1)\n ball_ref.SetCenter(coord)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(ball_ref.GetOutputPort())\n\n prop = vtk.vtkProperty()\n prop.SetColor(color)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.SetProperty(prop)\n\n ren.AddActor(actor)\n\n return actor\n\n\ndef add_plane(ren, coil_center=[0., 0., 0.], coil_normal=[0., 0., 1.], user_matrix=np.identity(4)):\n\n coil_plane = vtk.vtkPlaneSource()\n coil_plane.SetOrigin(0, 0, 0)\n coil_plane.SetPoint1(0, 50, 0)\n coil_plane.SetPoint2(100, 0, 0)\n coil_plane.SetCenter(coil_center)\n coil_plane.SetNormal(coil_normal)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(coil_plane.GetOutputPort())\n\n prop = vtk.vtkProperty()\n prop.SetColor(0.5, 0., 0.5)\n # prop.SetColor(1., 0., 0.)\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.SetProperty(prop)\n\n matrix_vtk = vtk.vtkMatrix4x4()\n\n for row in range(0, 4):\n for col in range(0, 4):\n matrix_vtk.SetElement(row, col, user_matrix[row, col])\n\n actor.SetUserMatrix(matrix_vtk)\n\n # Assign actor to the renderer\n ren.AddActor(actor)\n\n return actor\n\n\ndef load_stl(stl_path, ren, opacity=1., visibility=1, position=False, colour=False, replace=False, user_matrix=np.identity(4)):\n vtk_colors = vtk.vtkNamedColors()\n vtk_colors.SetColor(\"SkinColor\", [233, 200, 188, 255])\n vtk_colors.SetColor(\"BkgColor\", [51, 77, 102, 255])\n\n reader = vtk.vtkSTLReader()\n reader.SetFileName(stl_path)\n reader.Update()\n\n poly_normals = vtk.vtkPolyDataNormals()\n poly_normals.SetInputData(reader.GetOutput())\n poly_normals.ConsistencyOn()\n poly_normals.AutoOrientNormalsOn()\n poly_normals.SplittingOff()\n poly_normals.Update()\n\n if replace:\n transx, transy, transz, rotx, roty, rotz = replace\n # create a transform that rotates the stl source\n transform = vtk.vtkTransform()\n transform.PostMultiply()\n transform.RotateX(rotx)\n transform.RotateY(roty)\n transform.RotateZ(rotz)\n transform.Translate(transx, transy, transz)\n\n transform_filt = vtk.vtkTransformPolyDataFilter()\n transform_filt.SetTransform(transform)\n transform_filt.SetInputConnection(poly_normals.GetOutputPort())\n transform_filt.Update()\n\n mapper = vtk.vtkPolyDataMapper()\n\n if vtk.VTK_MAJOR_VERSION <= 5:\n if replace:\n mapper.SetInput(transform_filt.GetOutput())\n else:\n mapper.SetInput(poly_normals.GetOutput())\n else:\n if replace:\n mapper.SetInputConnection(transform_filt.GetOutputPort())\n else:\n mapper.SetInputConnection(poly_normals.GetOutputPort())\n\n mapper.ScalarVisibilityOff()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetOpacity(opacity)\n actor.SetVisibility(visibility)\n\n if colour:\n if type(colour) is str:\n actor.GetProperty().SetDiffuseColor(vtk_colors.GetColor3d(\"SkinColor\"))\n actor.GetProperty().SetSpecular(.3)\n actor.GetProperty().SetSpecularPower(20)\n\n else:\n actor.GetProperty().SetColor(colour)\n\n if position:\n actor.SetPosition(position)\n\n matrix_vtk = vtk.vtkMatrix4x4()\n\n for row in range(0, 4):\n for col in range(0, 4):\n matrix_vtk.SetElement(row, col, user_matrix[row, col])\n\n actor.SetUserMatrix(matrix_vtk)\n\n # Assign actor to the renderer\n ren.AddActor(actor)\n\n return actor\n\n\ndef create_coil(coil_path, coil_center, coil_dir, coil_normal):\n\n reader = vtk.vtkSTLReader()\n reader.SetFileName(coil_path)\n\n print(coil_path)\n\n transform = vtk.vtkTransform()\n # transform.RotateZ(90)\n transform.RotateZ(0)\n\n transform_filt = vtk.vtkTransformPolyDataFilter()\n transform_filt.SetTransform(transform)\n transform_filt.SetInputData(reader.GetOutput())\n transform_filt.Update()\n\n normals = vtk.vtkPolyDataNormals()\n # normals.SetInputData(transform_filt.GetOutput())\n normals.SetInputData(reader.GetOutput())\n normals.SetFeatureAngle(80)\n normals.AutoOrientNormalsOn()\n normals.Update()\n\n obj_mapper = vtk.vtkPolyDataMapper()\n obj_mapper.SetInputConnection(reader.GetOutputPort())\n # obj_mapper.SetInputData(normals.GetOutput())\n # obj_mapper.ScalarVisibilityOff()\n # obj_mapper.ImmediateModeRenderingOn() # improve performance\n\n coil_actor = vtk.vtkActor()\n coil_actor.SetMapper(obj_mapper)\n # coil_actor.GetProperty().SetOpacity(0.9)\n coil_actor.SetVisibility(1)\n # coil_actor.SetUserMatrix(m_img_vtk)\n\n return coil_actor\n\n\ndef add_line(renderer, p1, p2, color=[0.0, 0.0, 1.0]):\n line = vtk.vtkLineSource()\n line.SetPoint1(p1)\n line.SetPoint2(p2)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(line.GetOutputPort())\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(color)\n\n renderer.AddActor(actor)\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\nimport nibabel as nb\nimport numpy as np\nimport external.transformations as tf\nimport Trekker\nimport vtk\nimport time\n\n\ndef main():\n SHOW_AXES = True\n AFFINE_IMG = True\n NO_SCALE = True\n\n data_dir = b'C:\\Users\\deoliv1\\OneDrive - Aalto University\\data\\dti_navigation\\juuso'\n stl_path = b'wm_orig_smooth_world.stl'\n brain_path = os.path.join(data_dir, stl_path)\n\n stl_path = b'wm.stl'\n brain_inv_path = os.path.join(data_dir, stl_path)\n\n nii_path = b'sub-P0_dwi_FOD.nii'\n trk_path = os.path.join(data_dir, nii_path)\n\n nii_path = b'sub-P0_T1w_biascorrected.nii'\n img_path = os.path.join(data_dir, nii_path)\n\n imagedata = nb.squeeze_image(nb.load(img_path.decode('utf-8')))\n imagedata = nb.as_closest_canonical(imagedata)\n imagedata.update_header()\n pix_dim = imagedata.header.get_zooms()\n img_shape = imagedata.header.get_data_shape()\n\n # print(imagedata.header)\n\n print(\"pix_dim: {}, img_shape: {}\".format(pix_dim, img_shape))\n\n if AFFINE_IMG:\n affine = imagedata.affine\n if NO_SCALE:\n scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine)\n affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp)\n else:\n affine = np.identity(4)\n\n print(\"affine: {0}\\n\".format(affine))\n\n # Create a rendering window and renderer\n ren = vtk.vtkRenderer()\n ren_win = vtk.vtkRenderWindow()\n ren_win.AddRenderer(ren)\n ren_win.SetSize(800, 800)\n\n # Create a renderwindowinteractor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(ren_win)\n\n tracker = Trekker.initialize(trk_path)\n tracker.seed_maxTrials(1)\n tracker.minFODamp(0.1)\n tracker.writeInterval(50)\n tracker.maxLength(200)\n tracker.minLength(20)\n tracker.maxSamplingPerStep(100)\n\n repos = [0., 0., 0., 0., 0., 0.]\n brain_actor = load_stl(brain_inv_path, ren, opacity=.1, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4))\n bds = brain_actor.GetBounds()\n print(\"Y length: {} --- Bounds: {}\".format(bds[3] - bds[2], bds))\n\n repos = [0., 0., 0., 0., 0., 0.]\n brain_actor_mri = load_stl(brain_path, ren, opacity=.1, colour=[0.0, 1.0, 0.0], replace=repos, user_matrix=np.linalg.inv(affine))\n bds = brain_actor_mri.GetBounds()\n print(\"Y length: {} --- Bounds: {}\".format(bds[3] - bds[2], bds))\n\n repos = [0., 256., 0., 0., 0., 0.]\n # brain_inv_actor = load_stl(brain_inv_path, ren, colour=\"SkinColor\", opacity=0.5, replace=repos, user_matrix=np.linalg.inv(affine))\n brain_inv_actor = load_stl(brain_inv_path, ren, colour=\"SkinColor\", opacity=.1, replace=repos)\n\n # Add axes to scene origin\n if SHOW_AXES:\n add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0])\n add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0])\n add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0])\n\n # Show tracks\n repos_trk = [0., -256., 0., 0., 0., 0.]\n\n matrix_vtk = vtk.vtkMatrix4x4()\n\n trans = np.identity(4)\n trans[1, -1] = repos_trk[1]\n final_matrix = np.linalg.inv(affine) @ trans\n\n print(\"final_matrix: {}\".format(final_matrix))\n\n for row in range(0, 4):\n for col in range(0, 4):\n matrix_vtk.SetElement(row, col, final_matrix[row, col])\n\n for i in range(10):\n seed = np.array([[-8.49, -8.39, 2.5]])\n visualizeTracks(ren, ren_win, tracker, seed, user_matrix=matrix_vtk)\n\n # Assign actor to the renderer\n ren.AddActor(brain_actor)\n ren.AddActor(brain_inv_actor)\n ren.AddActor(brain_actor_mri)\n\n # Enable user interface interactor\n iren.Initialize()\n ren_win.Render()\n iren.Start()\n\n\ndef load_stl(stl_path, ren, opacity=1., visibility=1, position=False, colour=False, replace=False, user_matrix=np.identity(4)):\n vtk_colors = vtk.vtkNamedColors()\n vtk_colors.SetColor(\"SkinColor\", [233, 200, 188, 255])\n vtk_colors.SetColor(\"BkgColor\", [51, 77, 102, 255])\n\n reader = vtk.vtkSTLReader()\n reader.SetFileName(stl_path)\n reader.Update()\n\n poly_normals = vtk.vtkPolyDataNormals()\n poly_normals.SetInputData(reader.GetOutput())\n poly_normals.ConsistencyOn()\n poly_normals.AutoOrientNormalsOn()\n poly_normals.SplittingOff()\n poly_normals.Update()\n\n if replace:\n transx, transy, transz, rotx, roty, rotz = replace\n # create a transform that rotates the stl source\n transform = vtk.vtkTransform()\n transform.PostMultiply()\n transform.RotateX(rotx)\n transform.RotateY(roty)\n transform.RotateZ(rotz)\n transform.Translate(transx, transy, transz)\n\n transform_filt = vtk.vtkTransformPolyDataFilter()\n transform_filt.SetTransform(transform)\n transform_filt.SetInputConnection(poly_normals.GetOutputPort())\n transform_filt.Update()\n\n mapper = vtk.vtkPolyDataMapper()\n\n if vtk.VTK_MAJOR_VERSION <= 5:\n if replace:\n mapper.SetInput(transform_filt.GetOutput())\n else:\n mapper.SetInput(poly_normals.GetOutput())\n else:\n if replace:\n mapper.SetInputConnection(transform_filt.GetOutputPort())\n else:\n mapper.SetInputConnection(poly_normals.GetOutputPort())\n\n mapper.ScalarVisibilityOff()\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetOpacity(opacity)\n actor.SetVisibility(visibility)\n actor.GetProperty().SetBackfaceCulling(1)\n\n if colour:\n if type(colour) is str:\n actor.GetProperty().SetDiffuseColor(vtk_colors.GetColor3d(\"SkinColor\"))\n actor.GetProperty().SetSpecular(.3)\n actor.GetProperty().SetSpecularPower(20)\n\n else:\n actor.GetProperty().SetColor(colour)\n\n if position:\n actor.SetPosition(position)\n\n matrix_vtk = vtk.vtkMatrix4x4()\n\n for row in range(0, 4):\n for col in range(0, 4):\n matrix_vtk.SetElement(row, col, user_matrix[row, col])\n\n actor.SetUserMatrix(matrix_vtk)\n\n # Assign actor to the renderer\n ren.AddActor(actor)\n\n return actor\n\n\ndef visualizeTracks(renderer, renderWindow, tracker, seed, user_matrix):\n # Input the seed to the tracker object\n tracker.seed_coordinates(seed)\n\n # Run the tracker\n # This step will create N tracks if seed is a 3xN matrix\n tractogram = tracker.run()\n\n # Convert the first track to a vtkActor, i.e., tractogram[0] is the track\n # computed for the first seed\n trkActor = trk2vtkActor(tractogram[0])\n\n trkActor.SetUserMatrix(user_matrix)\n\n renderer.AddActor(trkActor)\n renderWindow.Render()\n\n return\n\n\n# This function converts a single track to a vtkActor\ndef trk2vtkActor(trk):\n # convert trk to vtkPolyData\n trk = np.transpose(np.asarray(trk))\n numberOfPoints = trk.shape[0]\n\n points = vtk.vtkPoints()\n lines = vtk.vtkCellArray()\n\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(3)\n # colors = vtk.vtkFloatArray()\n # colors.SetNumberOfComponents(4)\n # colors.SetName(\"tangents\")\n\n k = 0\n lines.InsertNextCell(numberOfPoints)\n for j in range(numberOfPoints):\n points.InsertNextPoint(trk[j, :])\n lines.InsertCellPoint(k)\n k = k + 1\n\n if j < (numberOfPoints - 1):\n direction = trk[j + 1, :] - trk[j, :]\n direction = direction / np.linalg.norm(direction)\n direc = [int(255 * abs(s)) for s in direction]\n colors.InsertNextTuple(direc)\n # colors.InsertNextTuple(np.abs([direction[0], direction[1], direction[2], 1]))\n else:\n colors.InsertNextTuple(direc)\n # colors.InsertNextTuple(np.abs([direction[0], direction[1], direction[2], 1]))\n\n trkData = vtk.vtkPolyData()\n trkData.SetPoints(points)\n trkData.SetLines(lines)\n trkData.GetPointData().SetScalars(colors)\n\n # make it a tube\n trkTube = vtk.vtkTubeFilter()\n trkTube.SetRadius(0.3)\n trkTube.SetNumberOfSides(4)\n trkTube.SetInputData(trkData)\n trkTube.Update()\n\n # if replace:\n # transx, transy, transz, rotx, roty, rotz = replace\n # # create a transform that rotates the stl source\n # transform = vtk.vtkTransform()\n # transform.PostMultiply()\n # transform.RotateX(rotx)\n # transform.RotateY(roty)\n # transform.RotateZ(rotz)\n # transform.Translate(transx, transy, transz)\n #\n # transform_filt = vtk.vtkTransformPolyDataFilter()\n # transform_filt.SetTransform(transform)\n # transform_filt.SetInputConnection(trkTube.GetOutputPort())\n # transform_filt.Update()\n\n # mapper\n trkMapper = vtk.vtkPolyDataMapper()\n trkMapper.SetInputData(trkTube.GetOutput())\n\n # actor\n trkActor = vtk.vtkActor()\n trkActor.SetMapper(trkMapper)\n\n return trkActor\n\n\ndef add_line(renderer, p1, p2, color=[0.0, 0.0, 1.0]):\n line = vtk.vtkLineSource()\n line.SetPoint1(p1)\n line.SetPoint2(p2)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(line.GetOutputPort())\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(color)\n\n renderer.AddActor(actor)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.asarray",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.linalg.det",
"numpy.identity",
"numpy.cross",
"scipy.io.savemat"
],
[
"numpy.linalg.inv",
"numpy.asarray",
"numpy.linalg.norm",
"numpy.identity",
"numpy.array"
]
] |
bierschi/nclt2ros
|
[
"77b30ca6750d4b0cd82ccb6660f2fd0946581091"
] |
[
"nclt2ros/visualizer/gt.py"
] |
[
"import math\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom nclt2ros.visualizer.plotter import Plotter\r\nfrom nclt2ros.transformer.coordinate_frame import CoordinateFrame\r\n\r\n\r\nclass GroundTruth(Plotter):\r\n \"\"\"Class to visualize the ground truth data as a kml and png file\r\n\r\n USAGE:\r\n GroundTruth(date='2013-01-10', output_file='ground_truth', plt_show=True)\r\n\r\n \"\"\"\r\n def __init__(self, date, output_file='ground_truth', plt_show=True):\r\n\r\n if isinstance(output_file, str):\r\n self.output_file = output_file\r\n else:\r\n raise TypeError(\"'output_file' must be type of string\")\r\n\r\n self.date = date\r\n self.plt_show = plt_show\r\n\r\n # init base class\r\n Plotter.__init__(self, date=self.date)\r\n\r\n # transformer coordinate frame into 'gt'\r\n self.gt_converter = CoordinateFrame(origin='gt')\r\n\r\n # load gt data\r\n self.gt = self.reader_gt.read_gt_csv(all_in_one=True)\r\n\r\n def save_kml_line(self):\r\n \"\"\"visualize the ground truth as a kml file\r\n \"\"\"\r\n\r\n gt_x = self.gt[:, 1]\r\n gt_y = self.gt[:, 2]\r\n gt_list = list()\r\n\r\n if len(gt_x) > 600000:\r\n divider = 35\r\n elif len(gt_x) > 200000:\r\n divider = 15\r\n else:\r\n divider = 5\r\n\r\n for row, (x_i, y_j) in enumerate(zip(gt_x, gt_y)):\r\n\r\n if not math.isnan(x_i) and not math.isnan(y_j):\r\n lat = self.gt_converter.get_lat(x=x_i)\r\n lng = self.gt_converter.get_lon(y=y_j)\r\n\r\n tup = (lng, lat)\r\n\r\n # minimize the elements in the kml output file\r\n if (row % divider) == 0:\r\n gt_list.append(tup)\r\n\r\n # create line string\r\n ls = self.kml.newlinestring(name=\"ground truth\", coords=gt_list, description=\"latitude and longitude from ground truth\",)\r\n ls.style.linestyle.width = 1\r\n ls.style.linestyle.color = self.green\r\n\r\n # save kml file in visualization directory\r\n self.kml.save(self.visualization_kml_dir + self.output_file + '.kml')\r\n\r\n def get_gt_data(self):\r\n \"\"\"get ground truth data for visualization\r\n\r\n :return: list for x coordinates, list for y coordinates\r\n \"\"\"\r\n gt_x = self.gt[:, 1]\r\n gt_y = self.gt[:, 2]\r\n first_x_coord = gt_x[0]\r\n first_y_coord = gt_y[0]\r\n x_new = list()\r\n y_new = list()\r\n\r\n for row, (x_i, y_j) in enumerate(zip(gt_x, gt_y)):\r\n\r\n if not math.isnan(x_i) and not math.isnan(y_j):\r\n # eliminate offset in this dataset\r\n if self.date == '2013-01-10':\r\n x_i = x_i - first_x_coord\r\n y_j = y_j - first_y_coord\r\n y_new.append(y_j)\r\n x_new.append(x_i)\r\n else:\r\n y_new.append(y_j)\r\n x_new.append(x_i)\r\n\r\n return x_new, y_new\r\n\r\n def save_gt_png(self):\r\n \"\"\"visualize the ground truth as a png file\r\n\r\n :param offset: Boolean, True if eliminate the offset between odom and ground truth coordinate frame\r\n \"\"\"\r\n\r\n x_new, y_new = self.get_gt_data()\r\n\r\n plt.plot(y_new, x_new, color=\"lime\", label='ground truth')\r\n\r\n plt.xlabel('x in meter')\r\n plt.ylabel('y in meter')\r\n plt.legend(loc='upper left')\r\n\r\n plt.grid()\r\n\r\n plt.title('Ground Truth')\r\n plt.savefig(self.visualization_png_gt_dir + self.output_file + '.png')\r\n\r\n if self.plt_show:\r\n plt.show()\r\n\r\n def save_roll_png(self):\r\n \"\"\"visualize the roll angle as a png file\r\n \"\"\"\r\n\r\n utimes = self.gt[:, 0]\r\n roll_rad = self.gt[:, 4]\r\n plt.plot(utimes, roll_rad, color=\"blue\", label='roll angle')\r\n\r\n plt.xlabel('time in sec')\r\n plt.ylabel('roll in rad')\r\n plt.legend(loc='upper left')\r\n plt.grid()\r\n\r\n plt.title('Roll angle from Ground Truth')\r\n plt.savefig(self.visualization_png_gt_dir + self.output_file + '_roll.png')\r\n plt.show()\r\n\r\n def save_pitch_png(self):\r\n \"\"\"visualize the pitch angle as a png file\r\n \"\"\"\r\n utimes = self.gt[:, 0]\r\n pitch_rad = self.gt[:, 5]\r\n\r\n plt.plot(utimes, pitch_rad, color=\"blue\", label='pitch angle')\r\n\r\n plt.xlabel('time in sec')\r\n plt.ylabel('pitch in rad')\r\n plt.legend(loc='upper left')\r\n plt.grid()\r\n\r\n plt.title('Pitch angle from Ground Truth')\r\n plt.savefig(self.visualization_png_gt_dir + self.output_file + '_pitch.png')\r\n plt.show()\r\n\r\n def save_yaw_png(self):\r\n \"\"\"visualize the yaw angle as a png file\r\n \"\"\"\r\n utimes = self.gt[:, 0]\r\n yaw_rad = self.gt[:, 6]\r\n\r\n plt.plot(utimes, yaw_rad, color=\"blue\", label='yaw angle')\r\n\r\n plt.xlabel('time in sec')\r\n plt.ylabel('yaw in rad')\r\n plt.legend(loc='upper left')\r\n plt.grid()\r\n\r\n plt.title('Yaw angle from Ground Truth')\r\n plt.savefig(self.visualization_png_gt_dir + self.output_file + '_yaw.png')\r\n plt.show()\r\n\r\n def get_png_gt_dir(self):\r\n \"\"\"get the png ground truth directory\r\n\r\n :return: path to png ground truth directory\r\n \"\"\"\r\n return self.visualization_png_gt_dir\r\n\r\n\r\nif __name__ == '__main__':\r\n gt = GroundTruth(date='2012-01-15')\r\n gt.save_gt_png()\r\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
cyphylab/crazyflie_ros
|
[
"fa81ecfff16ea2e1e30369fa5dcfbad40acc3dba"
] |
[
"crazyflie_demo/scripts/uav_trajectory.py"
] |
[
"#!/usr/bin/env python\nimport numpy as np\n\ndef normalize(v):\n norm = np.linalg.norm(v)\n assert norm > 0\n return v / norm\n\n\nclass Polynomial:\n def __init__(self, p):\n self.p = p\n\n # evaluate a polynomial using horner's rule\n def eval(self, t):\n assert t >= 0\n x = 0.0\n for i in range(0, len(self.p)):\n x = x * t + self.p[len(self.p) - 1 - i]\n return x\n\n # compute and return derivative\n def derivative(self):\n return Polynomial([(i+1) * self.p[i+1] for i in range(0, len(self.p) - 1)])\n\n\nclass TrajectoryOutput:\n def __init__(self):\n self.pos = None # position [m]\n self.vel = None # velocity [m/s]\n self.acc = None # acceleration [m/s^2]\n self.omega = None # angular velocity [rad/s]\n self.yaw = None # yaw angle [rad]\n self.R = None # Orientation of the Body frame\n\n\n# 4d single polynomial piece for x-y-z-yaw, includes duration.\nclass Polynomial4D:\n def __init__(self, duration, px, py, pz, pyaw):\n self.duration = duration\n self.px = Polynomial(px)\n self.py = Polynomial(py)\n self.pz = Polynomial(pz)\n self.pyaw = Polynomial(pyaw)\n\n # compute and return derivative\n def derivative(self):\n return Polynomial4D(\n self.duration,\n self.px.derivative().p,\n self.py.derivative().p,\n self.pz.derivative().p,\n self.pyaw.derivative().p)\n\n def eval(self, t):\n result = TrajectoryOutput()\n # flat variables\n result.pos = np.array([self.px.eval(t), self.py.eval(t), self.pz.eval(t)])\n result.yaw = self.pyaw.eval(t)\n\n # 1st derivative\n derivative = self.derivative()\n result.vel = np.array([derivative.px.eval(t), derivative.py.eval(t), derivative.pz.eval(t)])\n dyaw = derivative.pyaw.eval(t)\n\n # 2nd derivative\n derivative2 = derivative.derivative()\n result.acc = np.array([derivative2.px.eval(t), derivative2.py.eval(t), derivative2.pz.eval(t)])\n\n # 3rd derivative\n derivative3 = derivative2.derivative()\n jerk = np.array([derivative3.px.eval(t), derivative3.py.eval(t), derivative3.pz.eval(t)])\n\n thrust = result.acc + np.array([0, 0, 9.81]) # add gravity\n\n z_body = normalize(thrust)\n x_world = np.array([np.cos(result.yaw), np.sin(result.yaw), 0])\n y_body = normalize(np.cross(z_body, x_world))\n x_body = np.cross(y_body, z_body)\n\n jerk_orth_zbody = jerk - (np.dot(jerk, z_body) * z_body)\n h_w = jerk_orth_zbody / np.linalg.norm(thrust)\n\n result.omega = np.array([-np.dot(h_w, y_body), np.dot(h_w, x_body), z_body[2] * dyaw])\n\n result.R = np.zeros((3,3), dtype=float)\n result.R[:, 0] = x_body\n result.R[:, 1] = y_body\n result.R[:, 2] = z_body\n return result\n\n\nclass Trajectory:\n def __init__(self):\n self.polynomials = None\n self.duration = None\n\n def loadcsv(self, filename):\n data = np.loadtxt(filename, delimiter=\",\", skiprows=1, usecols=range(33))\n self.polynomials = [Polynomial4D(row[0], row[1:9], row[9:17], row[17:25], row[25:33]) for row in data]\n self.duration = np.sum(data[:,0])\n\n def eval(self, t):\n assert t >= 0\n assert t <= self.duration\n\n current_t = 0.0\n for p in self.polynomials:\n if t < current_t + p.duration:\n return p.eval(t - current_t)\n current_t = current_t + p.duration\n"
] |
[
[
"numpy.dot",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
Hzfinfdu/DataLab
|
[
"0da0226866f59ed2e535c346833f0797499b5174"
] |
[
"src/datalabs/operations/aggregate/general.py"
] |
[
"from typing import Dict, List, Optional, Any\nfrom typing import Callable, Mapping, Iterator\n# nltk package for\nimport nltk\nimport numpy as np\n#sklearn is used for tfidf\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom .aggregating import Aggregating, aggregating\n\n\n\n\n@aggregating(name=\"get_features_dataset_level\", contributor=\"datalab\",\n task=\"Any\",\n description=\"Get the average length of a list of texts\")\ndef get_features_dataset_level(texts:Iterator) -> int:\n \"\"\"\n Package: python\n Input:\n texts: Iterator\n Output:\n int\n \"\"\"\n lengths = []\n for text in texts:\n lengths.append(len(text.split(\" \")))\n\n\n return {\"avg_length\":np.average(lengths)}\n\n\n\n\n\n@aggregating(name=\"get_average_length\", contributor=\"datalab\",\n task=\"Any\",\n description=\"Get the average length of a list of texts\")\ndef get_average_length(texts:Iterator) -> int:\n \"\"\"\n Package: python\n Input:\n texts: Iterator\n Output:\n int\n \"\"\"\n lengths = []\n for text in texts:\n lengths.append(len(text.split(\" \")))\n return {\"average_length\":np.average(lengths)}\n\n\n\n\n@aggregating(name=\"get_vocabulary\", contributor=\"datalab\",\n task=\"Any\", description=\"Get the vocabulary of a list of texts\")\ndef get_vocabulary(texts:Iterator) -> Dict:\n \"\"\"\n Package: python\n Input:\n texts: Iterator\n Output:\n int\n \"\"\"\n vocab = {}\n for text in texts:\n for w in text.split(\" \"):\n if w in vocab.keys():\n vocab[w] += 1\n else:\n vocab[w] = 1\n vocab_sorted = dict(sorted(vocab.items(), key=lambda item: item[1], reverse = True))\n return {\"vocabulary\":vocab_sorted}\n\n\n\n\n\n\n\n@aggregating(name=\"get_tfidf\", contributor=\"scikit-learn\",\n task=\"Any\", description=\"Calculate the tif-idf of a list of texts\")\ndef get_tfidf(texts:Iterator) -> int:\n \"\"\"\n Package: python\n Input:\n texts: Iterator\n Output:\n dict\n \"\"\"\n vectorizer = TfidfVectorizer()\n tfidf = vectorizer.fit_transform(texts)\n words = vectorizer.get_feature_names()\n outs = []\n for i in range(len(texts)):\n out = {}\n for j in range(len(words)):\n if tfidf[i, j] > 1e-5:\n out[words[j]] = tfidf[i, j]\n outs.append(out)\n return {\"tfidf\":outs}\n\n\n\n"
] |
[
[
"numpy.average",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
georgetown-analytics/Baseball-Hall-of-Fame
|
[
"5a41bb2c1a1e7aa2b7621311a5d2baff40751515",
"5a41bb2c1a1e7aa2b7621311a5d2baff40751515"
] |
[
"wrangle_538.py",
"clutchness_03.py"
] |
[
"# File: wrangle_538.py\n# Date Created: 2018-11-10\n# Author(s): Mahkah Wu\n# Purpose: Extracts team elo rankings from 538 file\n\n\nimport pandas as pd\nimport psycopg2\nfrom ignore import db_cred\n\n\ndf = pd.read_csv('ignore\\\\large_data\\\\538\\\\mlb_elo.csv')\n\ndf = df[(df['season'] >= 1950) & (df['season'] < 2017)]\ndf = df.loc[df['playoff'].isnull()]\n\ndf.reset_index(drop=True, inplace=True)\n\n# 538 uses franchise id rather than team id; fix that here\ndf['franchise1'] = df['team1']\ndf['franchise2'] = df['team2']\n\nquery = '''SELECT \"yearID\", \"teamID\", \"franchID\" FROM teams\nWHERE \"yearID\">=1950'''\nconn = db_cred.connect_db()\ndf_team_ids = pd.read_sql(query, conn)\n\ndf['team1'] = pd.merge(df, df_team_ids, how='left', left_on=['season', 'franchise1'], right_on=['yearID', 'franchID'])['teamID']\ndf['team2'] = pd.merge(df, df_team_ids, how='left', left_on=['season', 'franchise2'], right_on=['yearID', 'franchID'])['teamID']\n\n\n# Create the game id field\ndf['game_id'] = df['team1'] + df['date'].str.replace('-', '')\ndf['game_id'] = df['game_id'] + df['game_id'].duplicated(keep=False).astype(int).astype(str)\nidx = df['game_id'].loc[df['game_id'].duplicated(keep=False)].loc[df['game_id'].duplicated(keep='last')].index\ndf['game_id'].iloc[idx] = df['game_id'][idx].str.replace('1$', '2')\nidx = df['game_id'].loc[df['game_id'].duplicated(keep=False)].loc[df['game_id'].duplicated(keep='last')].index\ndf['game_id'].iloc[idx] = df['game_id'][idx].str.replace('2$', '3')\n\n# Deal with a couple game_id special cases\nmap = {'NYA200007080': 'NYA200007082',\n 'NYN200007080': 'NYN200007081',\n 'NYA200306280': 'NYA200306281',\n 'NYN200306280': 'NYN200306282',\n 'NYA200806270': 'NYA200806271',\n 'NYN200806270': 'NYN200806272',\n 'CIN201307230': 'CIN201307232',\n 'SFN201307230': 'SFN201307231'}\n\ndf['game_id'] = df['game_id'].replace(map)\n\n# Save data\ndf.to_csv('ignore\\\\large_data\\\\538\\\\wrangled_mlb_elo.csv', index=False)\n",
"# File: clutchness_02.py\n# Date Created: 2018-12-11\n# Author(s): Mahkah Wu\n# Purpose: Uses win probability computed by clutchness_02 to produce clutchness measures\n\nimport pandas as pd\nimport seaborn as sns\n\ndf = pd.read_csv('ignore\\predicted_wins2.csv')\ndf.drop('Unnamed: 0', axis=1, inplace=True)\n\n\ndf['delta_ens'] = df['f_win_ens'] - df['s_win_ens']\n# https://www.wolframalpha.com/input/?i=itegrate+-(2x)%5E2%2B1\ndf['delta_int'] = ((df['f_win_ens']-0.5) - 4 * df['f_win_ens'] ** 3 / 3) - ((df['s_win_ens']-0.5) - 4 * df['s_win_ens'] ** 3 / 3)\n\ndf_batter = df.groupby(['batter_id', 'year'])[['delta_ens']].count()\ndf_batter['average_win_change'] = df.groupby(['batter_id', 'year'])[['delta_ens']].mean()\ndf_batter['center_weighted_win_change'] = df.groupby(['batter_id', 'year'])[['delta_int']].mean()\ndf_batter.reset_index(inplace=True)\ndf_batter.rename(index=str, columns={\"delta_ens\": \"event_count\"}, inplace=True)\ndf_batter.to_csv('ignore\\\\batter_clutch.csv', index=False)\n\ndf_pitch = df.groupby(['pitcher_id', 'year'])[['delta_ens']].count()\ndf_pitch['average_win_change'] = df.groupby(['pitcher_id', 'year'])[['delta_ens']].mean()\ndf_pitch['average_win_change'] = df_pitch['average_win_change'] * -1\ndf_pitch['center_weighted_win_change'] = df.groupby(['pitcher_id', 'year'])[['delta_int']].mean()\ndf_pitch['center_weighted_win_change'] = df_pitch['center_weighted_win_change'] * -1\ndf_pitch.rename(index=str, columns={\"delta_ens\": \"event_count\"}, inplace=True)\ndf_pitch.reset_index(inplace=True)\ndf_pitch.to_csv('ignore\\\\pitcher_clutch.csv', index=False)\n\n\n\n\nsns_plot = sns.jointplot(x=\"s_win_prob_logit\", y=\"s_win_prob_nb_iso\", data=df, kind=\"kde\")\nsns_plot.savefig(\"figures\\\\compare_logit_iso_nb.png\")\n"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.read_sql"
],
[
"pandas.read_csv"
]
] |
jesnyder/cell-source_momentum
|
[
"2a88a399e635f54fdc9aa67031521d1a0dbd2fd4"
] |
[
"code/python/a0001_admin.py"
] |
[
"from bs4 import BeautifulSoup\r\nfrom datetime import datetime\r\nimport json\r\nimport lxml\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport pandas as pd\r\nfrom serpapi import GoogleSearch\r\nimport re\r\nimport requests\r\nimport time\r\n\r\n\r\ndef clean_dataframe(df):\r\n\r\n\r\n col_names = df.columns\r\n\r\n for name in df.columns:\r\n\r\n if 'Unnamed:' in str(name):\r\n del df[name]\r\n\r\n col_names_sort = ['patent_num', 'AwardNumber', 'citations']\r\n for name in col_names_sort:\r\n try:\r\n df = df.drop_duplicates(subset = name)\r\n df = df.sort_values(name, ascending=True)\r\n except:\r\n hello = 'hello'\r\n\r\n try:\r\n df = df.reset_index()\r\n del df['index']\r\n except:\r\n hello = 'hello'\r\n\r\n return(df)\r\n\r\n\r\ndef name_paths(name_article):\r\n \"\"\"\r\n provide article type\r\n make the needed files\r\n \"\"\"\r\n\r\n name_src = str(name_article + '_src_query')\r\n name_dst = str(name_article + '_dst_query')\r\n name_summary = str(name_article + '_sum')\r\n name_unique = str(name_article + '_unique_df')\r\n plot_unique = str(name_article + '_unique_plot')\r\n\r\n return name_src, name_dst, name_summary, name_unique, plot_unique\r\n\r\n\r\ndef retreive_categories():\r\n \"\"\"\r\n return file names in compare term folder\r\n \"\"\"\r\n list_categories = []\r\n\r\n compare_terms = os.path.join(retrieve_path('term_compare'))\r\n for file in os.listdir(compare_terms):\r\n\r\n file_split = file.split('.')\r\n file_name = file_split[0]\r\n list_categories.append(file_name)\r\n\r\n return(list_categories)\r\n\r\n\r\ndef retrieve_datetime():\r\n \"\"\"\r\n send current time as a string\r\n \"\"\"\r\n\r\n # datetime object containing current date and time\r\n now = datetime.now()\r\n\r\n print(\"now =\", now)\r\n\r\n # dd/mm/YY H:M:S\r\n dt_string = now.strftime(\"%Y-%m-%d %H %M %S\")\r\n print(\"date and time =\", dt_string)\r\n\r\n return(dt_string)\r\n\r\n\r\ndef retrieve_format(name):\r\n \"\"\"\r\n from name, return variable\r\n lookup in ref file:\r\n user_provided/admin/format.csv\r\n \"\"\"\r\n\r\n f = os.path.join(retrieve_path('format'))\r\n df = pd.read_csv(f)\r\n\r\n # find the value from the name\r\n df = df.loc[df['name'] == name]\r\n\r\n value = list(df['value'])\r\n value = value[0]\r\n value = value.split(' ')\r\n\r\n try:\r\n value = [int(item) for item in value]\r\n except:\r\n value = [str(item) for item in value]\r\n\r\n if name == 'fig_wid': value = int(value[0])\r\n if name == 'fig_hei': value = int(value[0])\r\n\r\n try:\r\n if len(value) == 1:\r\n value = int(value[0])\r\n except:\r\n hello = 'hello'\r\n\r\n return(value)\r\n\r\n\r\ndef retrieve_list(name):\r\n \"\"\"\r\n from the name of a csv path file\r\n return a list\r\n \"\"\"\r\n try:\r\n article_path = os.path.join(retrieve_path(name))\r\n df = pd.read_csv(article_path)\r\n\r\n except:\r\n df = pd.read_csv(name)\r\n\r\n df = clean_dataframe(df)\r\n\r\n for col in df.columns:\r\n target_list = list(df[col])\r\n\r\n return(target_list)\r\n\r\n\r\ndef retrieve_path(name):\r\n \"\"\"\r\n from the name of a file\r\n return the path to the file\r\n \"\"\"\r\n #print('began retrieve_path')\r\n\r\n\r\n for file_source in ['user_provided', 'program_generated']:\r\n try:\r\n f = os.path.join(file_source, 'admin', 'paths' + '.csv')\r\n #print('file = ' + str(f))\r\n df = pd.read_csv(f)\r\n\r\n # find the path from the name\r\n df = df.loc[df['name'] == name]\r\n path = list(df['path'])\r\n path = path[0]\r\n path = path.split(' ')\r\n break\r\n\r\n except:\r\n hello = 'hello'\r\n #print('retrieve_path: file not found: ' + name)\r\n #print('file not found.')\r\n\r\n\r\n # build the folder required to save a file\r\n for folder in path:\r\n\r\n # skip file names\r\n if '.' in str(folder):\r\n break\r\n\r\n # intiate a new variable to describe the path\r\n if folder == path[0]:\r\n path_short = os.path.join(folder)\r\n\r\n # add folders iteratively to build path\r\n else:\r\n path_short = os.path.join(path_short, folder)\r\n\r\n # check if the path exists\r\n if not os.path.exists(path_short):\r\n os.makedirs(path_short)\r\n\r\n path = os.path.join(*path)\r\n return(path)\r\n\r\n\r\ndef write_paths():\r\n \"\"\"\r\n write the paths for all the articles\r\n \"\"\"\r\n\r\n name, path = [], []\r\n df_article = pd.read_csv(retrieve_path('type_article'))\r\n for name_article in list(df_article['name']):\r\n\r\n name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_article)\r\n name_list = name_paths(name_article)\r\n name_list = list(name_list)\r\n\r\n name_list.append(str(name_article + '_query_html'))\r\n name_list.append(str(name_article + '_query_xml'))\r\n name_list.append(str(name_article + '_query_json'))\r\n name_list.append(str(name_article + '_query_df'))\r\n name_list.append(str(name_article + '_article_html'))\r\n name_list.append(str(name_article + '_article_xml'))\r\n name_list.append(str(name_article + '_article_json'))\r\n name_list.append(str(name_article + '_article_df'))\r\n name_list.append(str(name_article + '_aggregate_df'))\r\n name_list.append(str(name_article + '_annual_df'))\r\n name_list.append(str(name_article + '_annual_plot'))\r\n name_list.append(str(name_article + '_count_all_words_df'))\r\n name_list.append(str(name_article + '_compare_terms_df'))\r\n name_list.append(str(name_article + '_compare_terms_annual_count_df'))\r\n name_list.append(str(name_article + '_compare_terms_plot'))\r\n name_list.append(str(name_article + '_compare_terms_plot'))\r\n name_list.append(str(name_article + '_map_png'))\r\n name_list.append(str(name_article + '_map_gif'))\r\n\r\n\r\n\r\n for item in name_list:\r\n name.append(item)\r\n\r\n if 'src' in item:\r\n item_path = str('program_generated ' + name_article + ' query src')\r\n\r\n elif 'dst' in item:\r\n item_path = str('program_generated ' + name_article + ' query agg')\r\n\r\n elif 'sum' in item:\r\n item_path = str('program_generated ' + name_article + ' sum')\r\n\r\n elif 'unique_df' in item:\r\n item_path = str('program_generated ' + name_article + ' unique df')\r\n\r\n elif '_unique_plot' in item:\r\n item_path = str('program_generated ' + name_article + ' unique plot')\r\n\r\n elif '_query_html' in item:\r\n item_path = str('program_generated ' + name_article + ' query html')\r\n\r\n elif '_query_xml' in item:\r\n item_path = str('program_generated ' + name_article + ' query xml')\r\n\r\n elif '_query_json' in item:\r\n item_path = str('program_generated ' + name_article + ' query json')\r\n\r\n elif '_query_df' in item:\r\n item_path = str('program_generated ' + name_article + ' query df')\r\n\r\n elif '_article_html' in item:\r\n item_path = str('program_generated ' + name_article + ' article html')\r\n\r\n elif '_article_xml' in item:\r\n item_path = str('program_generated ' + name_article + ' article xml')\r\n\r\n elif '_article_json' in item:\r\n item_path = str('program_generated ' + name_article + ' article json')\r\n\r\n elif '_article_df' in item:\r\n item_path = str('program_generated ' + name_article + ' article df')\r\n\r\n elif '_aggregate_df' in item:\r\n item_path = str('program_generated ' + name_article + ' aggregate df')\r\n\r\n elif '_annual_df' in item:\r\n item_path = str('program_generated ' + name_article + ' annual df')\r\n\r\n elif '_annual_plot' in item:\r\n item_path = str('program_generated ' + name_article + ' annual plot')\r\n\r\n elif '_count_all_words_df' in item:\r\n item_path = str('program_generated ' + name_article + ' count_words df')\r\n\r\n elif '_compare_terms_df' in item:\r\n item_path = str('program_generated ' + name_article + ' compare_terms df')\r\n\r\n elif '_compare_terms_annual_count_df' in item:\r\n item_path = str('program_generated ' + name_article + ' compare_terms df_annual')\r\n\r\n elif '_compare_terms_plot' in item:\r\n item_path = str('program_generated ' + name_article + ' compare_terms plot')\r\n\r\n elif '_map_png' in item:\r\n item_path = str('program_generated ' + name_article + ' map png')\r\n\r\n elif '_map_gif' in item:\r\n item_path = str('program_generated ' + name_article + ' map gif')\r\n\r\n\r\n\r\n\r\n path.append(item_path)\r\n\r\n df = pd.DataFrame()\r\n df['name'] = name\r\n df['path'] = path\r\n f = os.path.join(retrieve_path('write_paths'))\r\n df.to_csv(f)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
puririshi98/GNMT
|
[
"12099ff622c1d459fae9b0cda10b21615a1a5064"
] |
[
"seq2seq/utils.py"
] |
[
"import logging.config\nimport os\nimport random\nimport sys\nimport time\nfrom contextlib import contextmanager\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn.init as init\nimport torch.utils.collect_env\n\n\ndef init_lstm_(lstm, init_weight=0.1):\n \"\"\"\n Initializes weights of LSTM layer.\n Weights and biases are initialized with uniform(-init_weight, init_weight)\n distribution.\n\n :param lstm: instance of torch.nn.LSTM\n :param init_weight: range for the uniform initializer\n \"\"\"\n # Initialize hidden-hidden weights\n init.uniform_(lstm.weight_hh_l0.data, -init_weight, init_weight)\n # Initialize input-hidden weights:\n init.uniform_(lstm.weight_ih_l0.data, -init_weight, init_weight)\n\n # Initialize bias. PyTorch LSTM has two biases, one for input-hidden GEMM\n # and the other for hidden-hidden GEMM. Here input-hidden bias is\n # initialized with uniform distribution and hidden-hidden bias is\n # initialized with zeros.\n init.uniform_(lstm.bias_ih_l0.data, -init_weight, init_weight)\n init.zeros_(lstm.bias_hh_l0.data)\n\n if lstm.bidirectional:\n init.uniform_(lstm.weight_hh_l0_reverse.data, -init_weight, init_weight)\n init.uniform_(lstm.weight_ih_l0_reverse.data, -init_weight, init_weight)\n\n init.uniform_(lstm.bias_ih_l0_reverse.data, -init_weight, init_weight)\n init.zeros_(lstm.bias_hh_l0_reverse.data)\n\n\ndef generate_seeds(rng, size):\n \"\"\"\n Generate list of random seeds\n\n :param rng: random number generator\n :param size: length of the returned list\n \"\"\"\n seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)]\n return seeds\n\n\ndef broadcast_seeds(seeds, device):\n \"\"\"\n Broadcasts random seeds to all distributed workers.\n Returns list of random seeds (broadcasted from workers with rank 0).\n\n :param seeds: list of seeds (integers)\n :param device: torch.device\n \"\"\"\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n seeds_tensor = torch.LongTensor(seeds).to(device)\n torch.distributed.broadcast(seeds_tensor, 0)\n seeds = seeds_tensor.tolist()\n return seeds\n\n\ndef setup_seeds(master_seed, epochs, device):\n \"\"\"\n Generates seeds from one master_seed.\n Function returns (worker_seeds, shuffling_seeds), worker_seeds are later\n used to initialize per-worker random number generators (mostly for\n dropouts), shuffling_seeds are for RNGs resposible for reshuffling the\n dataset before each epoch.\n Seeds are generated on worker with rank 0 and broadcasted to all other\n workers.\n\n :param master_seed: master RNG seed used to initialize other generators\n :param epochs: number of epochs\n :param device: torch.device (used for distributed.broadcast)\n \"\"\"\n if master_seed is None:\n # random master seed, random.SystemRandom() uses /dev/urandom on Unix\n master_seed = random.SystemRandom().randint(0, 2**32 - 1)\n if get_rank() == 0:\n # master seed is reported only from rank=0 worker, it's to avoid\n # confusion, seeds from rank=0 are later broadcasted to other\n # workers\n logging.info(f'Using random master seed: {master_seed}')\n else:\n # master seed was specified from command line\n logging.info(f'Using master seed from command line: {master_seed}')\n\n # initialize seeding RNG\n seeding_rng = random.Random(master_seed)\n\n # generate worker seeds, one seed for every distributed worker\n worker_seeds = generate_seeds(seeding_rng, get_world_size())\n\n # generate seeds for data shuffling, one seed for every epoch\n shuffling_seeds = generate_seeds(seeding_rng, epochs)\n\n # broadcast seeds from rank=0 to other workers\n worker_seeds = broadcast_seeds(worker_seeds, device)\n shuffling_seeds = broadcast_seeds(shuffling_seeds, device)\n return worker_seeds, shuffling_seeds\n\n\ndef barrier():\n \"\"\"\n Works as a temporary distributed barrier, currently pytorch\n doesn't implement barrier for NCCL backend.\n Calls all_reduce on dummy tensor and synchronizes with GPU.\n \"\"\"\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n torch.distributed.all_reduce(torch.cuda.FloatTensor(1))\n torch.cuda.synchronize()\n\n\ndef get_rank():\n \"\"\"\n Gets distributed rank or returns zero if distributed is not initialized.\n \"\"\"\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n rank = torch.distributed.get_rank()\n else:\n rank = 0\n return rank\n\n\ndef get_world_size():\n \"\"\"\n Gets total number of distributed workers or returns one if distributed is\n not initialized.\n \"\"\"\n if torch.distributed.is_available() and torch.distributed.is_initialized():\n world_size = torch.distributed.get_world_size()\n else:\n world_size = 1\n return world_size\n\n\n@contextmanager\ndef sync_workers():\n \"\"\"\n Yields distributed rank and synchronizes all workers on exit.\n \"\"\"\n rank = get_rank()\n yield rank\n barrier()\n\n\n@contextmanager\ndef timer(name, ndigits=2, sync_gpu=True):\n if sync_gpu:\n torch.cuda.synchronize()\n start = time.time()\n yield\n if sync_gpu:\n torch.cuda.synchronize()\n stop = time.time()\n elapsed = round(stop - start, ndigits)\n logging.info(f'TIMER {name} {elapsed}')\n\n\ndef setup_logging(log_file=os.devnull):\n \"\"\"\n Configures logging.\n By default logs from all workers are printed to the console, entries are\n prefixed with \"N: \" where N is the rank of the worker. Logs printed to the\n console don't include timestaps.\n Full logs with timestamps are saved to the log_file file.\n \"\"\"\n class RankFilter(logging.Filter):\n def __init__(self, rank):\n self.rank = rank\n\n def filter(self, record):\n record.rank = self.rank\n return True\n\n rank = get_rank()\n rank_filter = RankFilter(rank)\n\n logging_format = \"%(asctime)s - %(levelname)s - %(rank)s - %(message)s\"\n logging.basicConfig(level=logging.DEBUG,\n format=logging_format,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n filename=log_file,\n filemode='w')\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(rank)s: %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n logging.getLogger('').addFilter(rank_filter)\n\n\ndef set_device(cuda, local_rank):\n \"\"\"\n Sets device based on local_rank and returns instance of torch.device.\n\n :param cuda: if True: use cuda\n :param local_rank: local rank of the worker\n \"\"\"\n if cuda:\n torch.cuda.set_device(local_rank)\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n return device\n\n\ndef init_distributed(cuda):\n \"\"\"\n Initializes distributed backend.\n\n :param cuda: (bool) if True initializes nccl backend, if False initializes\n gloo backend\n \"\"\"\n world_size = int(os.environ.get('WORLD_SIZE', 1))\n distributed = (world_size > 1)\n if distributed:\n backend = 'nccl' if cuda else 'gloo'\n dist.init_process_group(backend=backend,\n init_method='env://')\n assert dist.is_initialized()\n return distributed\n\n\ndef log_env_info():\n \"\"\"\n Prints information about execution environment.\n \"\"\"\n logging.info('Collecting environment information...')\n env_info = torch.utils.collect_env.get_pretty_env_info()\n logging.info(f'{env_info}')\n\n\ndef pad_vocabulary(math):\n if math == 'fp16':\n pad_vocab = 8\n elif math == 'fp32':\n pad_vocab = 1\n return pad_vocab\n\n\ndef benchmark(test_acc, target_acc, test_perf, target_perf):\n def test(achieved, target, name):\n passed = True\n if target is not None and achieved is not None:\n logging.info(f'{name} achieved: {achieved:.2f} '\n f'target: {target:.2f}')\n if achieved >= target:\n logging.info(f'{name} test passed')\n else:\n logging.info(f'{name} test failed')\n passed = False\n return passed\n\n passed = True\n passed &= test(test_acc, target_acc, 'Accuracy')\n passed &= test(test_perf, target_perf, 'Performance')\n return passed\n\nclass AverageMeter:\n \"\"\"\n Computes and stores the average and current value\n \"\"\"\n def __init__(self, skip_first=True):\n self.reset()\n self.skip = skip_first\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n\n if self.skip:\n self.skip = False\n else:\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def reduce(self, op):\n \"\"\"\n Reduces average value over all workers.\n\n :param op: 'sum' or 'mean', reduction operator\n \"\"\"\n if op not in ('sum', 'mean'):\n raise NotImplementedError\n\n distributed = (get_world_size() > 1)\n if distributed:\n # Backward/forward compatibility around\n # https://github.com/pytorch/pytorch/commit/540ef9b1fc5506369a48491af8a285a686689b36 and\n # https://github.com/pytorch/pytorch/commit/044d00516ccd6572c0d6ab6d54587155b02a3b86\n # To accomodate change in Pytorch's distributed API\n if hasattr(dist, \"get_backend\"):\n _backend = dist.get_backend()\n if hasattr(dist, \"DistBackend\"):\n backend_enum_holder = dist.DistBackend\n else:\n backend_enum_holder = dist.Backend\n else:\n _backend = dist._backend\n backend_enum_holder = dist.dist_backend\n\n cuda = _backend == backend_enum_holder.NCCL\n\n if cuda:\n avg = torch.cuda.FloatTensor([self.avg])\n _sum = torch.cuda.FloatTensor([self.sum])\n else:\n avg = torch.FloatTensor([self.avg])\n _sum = torch.FloatTensor([self.sum])\n\n try:\n _reduce_op = dist.ReduceOp\n except AttributeError:\n _reduce_op = dist.reduce_op\n\n dist.all_reduce(avg, op=_reduce_op.SUM)\n dist.all_reduce(_sum, op=_reduce_op.SUM)\n self.avg = avg.item()\n self.sum = _sum.item()\n\n if op == 'mean':\n self.avg /= get_world_size()\n self.sum /= get_world_size()\n\n\ndef debug_tensor(tensor, name):\n \"\"\"\n Simple utility which helps with debugging.\n Takes a tensor and outputs: min, max, avg, std, number of NaNs, number of\n INFs.\n\n :param tensor: torch tensor\n :param name: name of the tensor (only for logging)\n \"\"\"\n logging.info(name)\n tensor = tensor.detach().float().cpu().numpy()\n logging.info(f'MIN: {tensor.min()} MAX: {tensor.max()} '\n f'AVG: {tensor.mean()} STD: {tensor.std()} '\n f'NAN: {np.isnan(tensor).sum()} INF: {np.isinf(tensor).sum()}')\n"
] |
[
[
"torch.nn.init.uniform_",
"torch.distributed.broadcast",
"torch.utils.collect_env.get_pretty_env_info",
"torch.FloatTensor",
"torch.device",
"torch.distributed.get_rank",
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"torch.LongTensor",
"numpy.isnan",
"torch.distributed.is_initialized",
"torch.cuda.FloatTensor",
"torch.distributed.is_available",
"torch.nn.init.zeros_",
"torch.distributed.get_world_size",
"torch.distributed.get_backend",
"torch.cuda.set_device",
"torch.distributed.all_reduce",
"numpy.isinf"
]
] |
JeppeDruedahl/HighFreqInc
|
[
"a06883586fa00b208c5cbe731108bfa925fee09b"
] |
[
"dstcode/moments.py"
] |
[
"import numpy as np\nfrom numba import njit\n\n@njit\ndef mean_var_skew_kurt_ages(x,age,cond,ages,periods=12):\n \"\"\" calcualte mean, variance, skewness and kurtosis \"\"\"\n \n # a. allocate memory\n N = x.shape[0]\n T = x.shape[1]\n Nages = ages.size\n\n Nactive = np.zeros(ages.size,dtype=np.int64)\n out = np.zeros((4,Nages))\n\n mean = out[0,:]\n var = out[1,:]\n skew = out[2,:]\n kurt = out[3,:]\n\n # b. determine sums and sample\n for i in range(N):\n for j in range(Nages):\n\n t0 = (ages[j]-age[i,0])*periods\n if t0 < 0 or t0+periods > T: continue\n\n for t in range(t0,t0+periods):\n if cond[i,t] and (not np.isnan(x[i,t])):\n Nactive[j] += 1\n mean[j] += x[i,t]\n\n # c. means\n for j in range(Nages):\n if Nactive[j] == 0:\n mean[j] = np.nan\n else:\n mean[j] /= Nactive[j]\n\n # d. variance and kurtosis\n for i in range(N): \n for j in range(Nages):\n\n if Nactive[j] == 0: continue\n t0 = (ages[j]-age[i,0])*periods\n if t0 < 0 or t0+periods > T: continue\n \n for t in range(t0,t0+periods):\n if cond[i,t] and (not np.isnan(x[i,t])):\n \n diff = x[i,t]-mean[j]\n diff2 = diff*diff\n\n var[j] += diff2\n skew[j] += diff2*diff\n kurt[j] += diff2*diff2\n \n # e. result\n for j in range(Nages):\n\n if Nactive[j] > 0:\n var[j] /= Nactive[j]-1\n else:\n var[j] = np.nan\n \n if Nactive[j] > 2:\n cor_fac = Nactive[j]/((Nactive[j]-1)*(Nactive[j]-2))\n skew[j] *= cor_fac\n skew[j] /= var[j]**(3/2)\n else:\n skew[j] = np.nan\n\n if Nactive[j] > 3:\n cor_fac = (((Nactive[j]-1)/Nactive[j])*((Nactive[j]-2)/(Nactive[j]+1))*(Nactive[j]-3))\n cor_sub = 3*(Nactive[j]-1)*(Nactive[j]-1) / ((Nactive[j]-2)*(Nactive[j]-3))\n kurt[j] /= cor_fac\n kurt[j] /= var[j]*var[j]\n kurt[j] -= cor_sub\n else: \n kurt[j] = np.nan\n \n return out.ravel()\n\n@njit\ndef cov_ages(a,b,offset,age,cond,ages,periods=12):\n \"\"\" calculate covariance \"\"\"\n\n # a. allocate memory\n N = a.shape[0]\n T = a.shape[1]\n Nages = ages.size\n\n Nactive = np.zeros(Nages,dtype=np.int64)\n mean_a = np.zeros(Nages)\n mean_b = np.zeros(Nages)\n cov = np.zeros(Nages)\n\n # b. determine sums and sample\n for i in range(N):\n for j in range(Nages):\n\n t0 = (ages[j]-age[i,0])*periods\n if t0 < 0: continue\n if t0+periods > T: continue\n if t0-offset < 0: continue\n \n for t in range(t0,t0+periods):\n\n if cond[i,t] and (not np.isnan(a[i,t])) and (not np.isnan(b[i,t-offset])):\n Nactive[j] += 1\n mean_a[j] += a[i,t]\n mean_b[j] += b[i,t-offset]\n\n # c. means\n for j in range(Nages):\n if Nactive[j] == 0:\n mean_a[j] = np.nan\n mean_b[j] = np.nan\n else:\n mean_a[j] /= Nactive[j]\n mean_b[j] /= Nactive[j]\n\n # d. covariance\n for i in range(N):\n \n for j in range(Nages):\n\n if Nactive[j] == 0: continue\n\n t0 = (ages[j]-age[i,0])*periods\n if t0 < 0: continue\n if t0+periods > T: continue\n if t0-offset < 0: continue\n \n for t in range(t0,t0+periods):\n if cond[i,t] and (not np.isnan(a[i,t])) and (not np.isnan(b[i,t-offset])):\n cov[j] += (a[i,t]-mean_a[j])*(b[i,t-offset]-mean_b[j])\n \n # e. result\n for j in range(Nages):\n\n if Nactive[j] > 0:\n cov[j] /= Nactive[j]-1\n else:\n cov[j] = np.nan\n \n return cov \n\n@njit\ndef share_in_range(x,etas_low,etas_high,age,cond,ages,periods=12):\n \"\"\" calculate share in range \"\"\"\n\n # a. allocate memory\n N = x.shape[0]\n T = x.shape[1]\n Nages = ages.size\n Netas = etas_low.size\n\n Nactive = np.zeros((Nages))\n Ntrue = np.zeros((Nages,Netas))\n\n # b. compute\n for i in range(N):\n for j in range(Nages):\n\n t0 = (ages[j]-age[i,0])*periods\n if t0 < 0 or t0+periods > T: continue\n\n for t in range(t0,t0+periods):\n\n if cond[i,t] and (not np.isnan(x[i,t])):\n\n Nactive[j] += 1\n for h in range(Netas):\n if (x[i,t] >= etas_low[h]) and (x[i,t] <= etas_high[h]):\n Ntrue[j,h] += 1\n else:\n break\n \n # c. result\n out = np.zeros((Netas,Nages))\n for j in range(Nages):\n if Nactive[j] > 0:\n for h in range(Netas):\n out[h,j] = Ntrue[j,h]/Nactive[j]\n else:\n for h in range(Netas):\n out[h,j] = np.nan\n\n return out.ravel()"
] |
[
[
"numpy.isnan",
"numpy.zeros"
]
] |
srinidhigoud/tvm
|
[
"3861fb8ee39746caa67f4577d17afc239be1dec5"
] |
[
"python/tvm/relay/op/contrib/tensorrt.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, unused-argument\n\"\"\"TensorRT supported operators.\"\"\"\nimport logging\nimport numpy as np\nimport tvm\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.build_module import bind_params_by_name\nfrom tvm.relay.expr import Call, Constant, Tuple, GlobalVar, Var, TupleGetItem\nfrom tvm.relay.expr_functor import ExprMutator, ExprVisitor\n\nlogger = logging.getLogger(\"TensorRT\")\n\n\ndef is_tensorrt_runtime_enabled():\n \"\"\"Check if the TensorRT graph runtime is present.\n Returns\n -------\n ret: bool\n True if present, False if not.\n \"\"\"\n check_enabled = tvm.get_global_func(\"relay.op.is_tensorrt_runtime_enabled\", True)\n if check_enabled:\n return check_enabled()\n return False\n\n\ndef get_tensorrt_version():\n \"\"\"Gets the version of TensorRT that TVM is built against or is targeting.\n\n Returns\n -------\n ret: Tuple[int, int, int]\n TensorRT version as a tuple of major, minor, and patch number. If TVM\n is not built with TensorRT, the value set by set_tensorrt_version() is returned instead.\n \"\"\"\n pass_ctx = tvm.transform.PassContext.current()\n if \"relay.ext.tensorrt.options\" in pass_ctx.config:\n return tuple(pass_ctx.config[\"relay.ext.tensorrt.options\"].tensorrt_version)\n return tuple(tvm.get_global_func(\"relay.op.get_tensorrt_version\")())\n\n\ndef get_tensorrt_use_implicit_batch_mode():\n pass_ctx = tvm.transform.PassContext.current()\n if \"relay.ext.tensorrt.options\" in pass_ctx.config:\n return pass_ctx.config[\"relay.ext.tensorrt.options\"].use_implicit_batch\n logger.warning(\n \"PassContext has no relay.ext.tensorrt.options config, using default value \"\n \"use_implicit_batch=True.\"\n )\n return True\n\n\ndef get_tensorrt_remove_no_mac_subgraphs():\n pass_ctx = tvm.transform.PassContext.current()\n if \"relay.ext.tensorrt.options\" in pass_ctx.config:\n return pass_ctx.config[\"relay.ext.tensorrt.options\"].remove_no_mac_subgraphs\n logger.warning(\n \"PassContext has no relay.ext.tensorrt.options config, using default value \"\n \"remove_no_mac_subgraphs=False.\"\n )\n return False\n\n\ndef partition_for_tensorrt(\n mod,\n params=None,\n version=None,\n use_implicit_batch=True,\n remove_no_mac_subgraphs=False,\n max_workspace_size=1 << 30,\n):\n \"\"\"Partition the graph greedily offloading supported operators to TensorRT.\n\n Parameters\n ----------\n mod : Module\n The module to run passes on.\n params : Optional[Dict[str, NDArray]]\n Constant input parameters.\n version : Optional[Tuple[int, int, int]]\n TensorRT version to target as tuple of (major, minor, patch). If TVM is compiled with\n USE_TENSORRT_RUNTIME=ON, the linked TensorRT version will be used instead.\n use_implicit_batch : Optional[bool]\n Use TensorRT implicit batch mode (default true). Setting to false will enable explicit batch\n mode which will widen supported operators to include those which modify the batch dimension,\n but may reduce performance for some models.\n remove_no_mac_subgraphs : Optional[bool]\n Removes subgraphs which have been partitioned for TensorRT if they do not have any\n multiply-accumulate operations. The removed subgraphs will go through TVM's standard\n compilation instead. Can improve performance.\n max_workspace_size : Optional[int]\n How many bytes of workspace size to allow each subgraph to use for TensorRT engine creation.\n See TensorRT documentation for more info.\n Returns\n -------\n mod_and_config : Tuple[Module, Dict[str, Any]]\n A tuple of 1) annotated and partitioned module and 2) \"relay.ext.tensorrt.options\"\n configuration which should be given to PassContext when building.\n \"\"\"\n config = {\n \"use_implicit_batch\": use_implicit_batch,\n \"max_workspace_size\": max_workspace_size,\n \"remove_no_mac_subgraphs\": remove_no_mac_subgraphs,\n }\n if version:\n assert isinstance(version, tuple) and len(version) == 3\n config[\"tensorrt_version\"] = version\n else:\n linked_version = tuple(tvm.get_global_func(\"relay.op.get_tensorrt_version\")())\n if not linked_version:\n logger.warning(\n \"TVM was not built against TensorRT and no version was provided to \"\n \"partition_for_tensorrt. Defaulting to 6.0.1\"\n )\n linked_version = (6, 0, 1)\n config[\"tensorrt_version\"] = linked_version\n\n if params:\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n seq = tvm.transform.Sequential(\n [\n transform.InferType(),\n RemoveDropoutPass(),\n transform.RemoveUnusedFunctions(),\n transform.ConvertLayout(\n {\n \"nn.conv2d\": [\"NCHW\", \"default\"],\n \"nn.conv3d\": [\"NCDHW\", \"default\"],\n \"nn.conv2d_transpose\": [\"NCHW\", \"default\"],\n }\n ),\n transform.FoldConstant(),\n transform.AnnotateTarget(\"tensorrt\"),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n transform.InferType(),\n ]\n )\n with tvm.transform.PassContext(opt_level=3, config={\"relay.ext.tensorrt.options\": config}):\n mod = seq(mod)\n mod = prune_tensorrt_subgraphs(mod)\n return mod, config\n\n\ndef check_dynamism(args, op_name):\n \"\"\"\n Check for dynamism inside any of the args in the op.\n\n Parameters\n ----------\n args : tvm.ir.container.Array\n Arguments of the op. Each of the argument shape is checked for presence of dynamic\n components.\n op_name: str\n Name of the op for debugging purposes only.\n Returns\n ----------\n ret : bool\n True if dynamism is present, False otherwise\n \"\"\"\n for arg in args:\n if isinstance(arg, (Call, Var, Constant, TupleGetItem)):\n for dim_shape in arg.checked_type.shape[1:]:\n if isinstance(dim_shape, tvm.tir.expr.Any):\n return True\n elif isinstance(arg, Tuple):\n return check_dynamism(arg.fields, op_name)\n else:\n logger.info(\n \"Arg not supported in TensorRT for %s with type %s\",\n op_name,\n type(arg),\n )\n return True\n return False\n\n\ndef _register_external_op_helper_with_checker(op_name, checker):\n @tvm.ir.register_op_attr(op_name, \"target.tensorrt\")\n def _func_wrapper(expr):\n attrs, args = expr.attrs, expr.args\n # ops with dynamic shapes are offloaded to VM\n if check_dynamism(args, op_name):\n return False\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if op_name == \"multiply\":\n shapes = [\n [\n int(x) if not isinstance(x, tvm.tir.expr.Any) else -1\n for x in arg.checked_type.shape\n ]\n for arg in args\n ]\n # Batched multiply operations don't work in implicit batch mode. The following shapes\n # have been excluded because they occur in PT MaskRCNN model. The long term solution is\n # to switch to explicit batch mode after performance regressions are solved.\n if all(\n [list(map(int, shape)) in [[300, 64, 7, 7], [300, 1, 1, 1]] for shape in shapes]\n ):\n return False\n return checker(attrs, args, op_name)\n\n return _func_wrapper\n\n\ndef _register_external_op_helper(op_name, supported=True):\n return _register_external_op_helper_with_checker(\n op_name, lambda attrs, args, op_name: supported\n )\n\n\ndef _register_external_dynamic_check_func(op_name):\n \"\"\"Wrapper to check dynamic shapes inside any of the args in the op.\"\"\"\n\n def _decorator_helper(checker):\n @tvm.ir.register_op_attr(op_name, \"target.tensorrt\")\n def _func_wrapper(expr):\n args = expr.args\n # ops with dynamic shapes are offloaded to VM\n if check_dynamism(args, op_name):\n return False\n return checker(expr)\n\n return _func_wrapper\n\n return _decorator_helper\n\n\n# Ops which are always supported\n_register_external_op_helper(\"nn.relu\")\n_register_external_op_helper(\"sigmoid\")\n_register_external_op_helper(\"tanh\")\n_register_external_op_helper(\"subtract\")\n_register_external_op_helper(\"multiply\")\n_register_external_op_helper(\"divide\")\n_register_external_op_helper(\"power\")\n_register_external_op_helper(\"maximum\")\n_register_external_op_helper(\"minimum\")\n_register_external_op_helper(\"exp\")\n_register_external_op_helper(\"log\")\n_register_external_op_helper(\"sqrt\")\n_register_external_op_helper(\"abs\")\n_register_external_op_helper(\"negative\")\n_register_external_op_helper(\"nn.batch_flatten\")\n_register_external_op_helper(\"clip\")\n\n\ndef reduce_annotate_fn(attrs, args, op_name):\n \"\"\"Helper for reduce operations.\"\"\"\n if not attrs.axis or len(attrs.axis) == 0:\n logger.info(\"%s: cannot reduce to scalar.\", op_name)\n return False\n if attrs.exclude:\n logger.info(\"%s: exclude not supported.\", op_name)\n return False\n if get_tensorrt_use_implicit_batch_mode() and any([x == 0 for x in map(int, attrs.axis)]):\n logger.info(\"%s: can't modify batch dimension.\", op_name)\n return False\n return True\n\n\n_register_external_op_helper_with_checker(\"sum\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"prod\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"max\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"min\", reduce_annotate_fn)\n_register_external_op_helper_with_checker(\"mean\", reduce_annotate_fn)\n\n\ndef trt_version_annotate_fn(version):\n \"\"\"Helper for ops which require a minimum TRT version\"\"\"\n\n def _func_wrapper(attrs, args, op_name):\n if get_tensorrt_version() < version:\n logger.info(\n \"%s: requires TensorRT version %s or higher.\", op_name, \".\".join(map(str, version))\n )\n return False\n return True\n\n return _func_wrapper\n\n\n_register_external_op_helper_with_checker(\"nn.leaky_relu\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"sin\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"cos\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"atan\", trt_version_annotate_fn((5, 1, 5)))\n_register_external_op_helper_with_checker(\"ceil\", trt_version_annotate_fn((5, 1, 5)))\n\n\n@_register_external_dynamic_check_func(\"add\")\ndef add_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if add is supported by TensorRT.\"\"\"\n\n args = expr.args\n\n shapes = [\n [int(x) if not isinstance(x, tvm.tir.expr.Any) else -1 for x in arg.checked_type.shape]\n for arg in args\n ]\n\n # RelayVM + TRT doesn't support scalar addition yet.\n for shape in shapes:\n if len(shape) < 1:\n return False\n\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if (\n not get_tensorrt_use_implicit_batch_mode()\n and (isinstance(args[0], Constant) or isinstance(args[1], Constant))\n and shapes[0][0] == shapes[1][0]\n and shapes[0][0] != 1\n and (len(shapes[0]) > 3 or len(shapes[1]) > 3)\n ):\n logger.info(\"add: bug in TRT with adding batched constants.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.batch_norm\")\ndef batch_norm_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.batch_norm is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if len(args[0].checked_type.shape) == 5 and get_tensorrt_version() < (6, 0, 1):\n logger.info(\"nn.batch_norm: TensorRT 6.0.1 or higher is required for rank 5 inputs.\")\n return False\n if len(args[0].checked_type.shape) > 5:\n logger.info(\"nn.batch_norm: Input rank must be 5 or less.\")\n return False\n if int(attrs.axis) not in (1, 3):\n logger.info(\"nn.batch_norm: axis is %d but must be 1 or 3.\", int(attrs.axis))\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.softmax\")\ndef softmax_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.softmax is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:\n logger.info(\"nn.softmax: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"qnn.conv2d\")\ndef qnn_conv2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if qnn.conv2d is supported by TensorRT.\"\"\"\n return True\n\n\n@_register_external_dynamic_check_func(\"qnn.requantize\")\ndef qnn_requantize_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if qnn.requantize is supported by TensorRT.\"\"\"\n return True\n\n\n@_register_external_dynamic_check_func(\"qnn.quantize\")\ndef qnn_quantize_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if qnn.quantize is supported by TensorRT.\"\"\"\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv2d\")\ndef conv2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.data_layout != \"NCHW\":\n logger.info(\"nn.conv2d: data_layout is %s but must be NCHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIHW\":\n logger.info(\"nn.conv2d: kernel_layout is %s but must be OIHW.\", attrs.kernel_layout)\n return False\n if attrs.out_layout and attrs.out_layout != \"NCHW\":\n logger.info(\"nn.conv2d: out_layout is %s but must be NCHW.\", attrs.out_layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.dense\")\ndef dense_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if dense is supported by TensorRT.\"\"\"\n\n args = expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n input_rank = len(args[0].checked_type.shape)\n weight_rank = len(args[1].checked_type.shape)\n if input_rank not in (2, 3, 4):\n logger.info(\"nn.dense: input has rank %d but must be 2, 3 or 4.\", input_rank)\n return False\n if weight_rank != 2:\n logger.info(\"nn.dense: weight has rank %d but must be 2.\", weight_rank)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.bias_add\")\ndef bias_add_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.bias_add is supported by TensorRT.\"\"\"\n\n args = expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n input_rank = len(args[0].checked_type.shape)\n if input_rank not in (2, 3, 4):\n logger.info(\"nn.bias_add: input rank is %d but must be 2, 3 or 4.\", input_rank)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.max_pool2d\")\ndef max_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.max_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.max_pool2d: layout is %s but must be NCHW.\", attrs.layout)\n return False\n if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):\n logger.info(\"nn.avg_pool2d: ceil_mode=True requires TensorRT 5.1.5 or greater.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.avg_pool2d\")\ndef avg_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.avg_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.avg_pool2d: layout is %d but must be NCHW.\", attrs.layout)\n return False\n if (\n attrs.count_include_pad\n and len(attrs.padding) == 4\n and (\n int(attrs.padding[0]) != int(attrs.padding[2])\n or int(attrs.padding[1]) != int(attrs.padding[3])\n )\n ):\n logger.info(\n \"nn.avg_pool2d: inclusive-counted blended or average \"\n \"pooling is not supported in combination with asymmetric padding\"\n )\n return False\n if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):\n logger.info(\"nn.avg_pool2d: ceil_mode=True requires TensorRT 5.1.5 or greater.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.global_max_pool2d\")\ndef global_max_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.global_max_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.global_max_pool2d: layout is %s but must be NCHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.global_avg_pool2d\")\ndef global_avg_pool_2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.global_avg_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.layout != \"NCHW\":\n logger.info(\"nn.global_avg_pool2d: layout is %s but must be NCHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"expand_dims\")\ndef expand_dims_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if expand_dims is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:\n logger.info(\"expand_dims: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"squeeze\")\ndef squeeze_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if squeeze is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not attrs.axis:\n logger.info(\"squeeze: must explicitly set axis.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and any([axis == 0 for axis in map(int, attrs.axis)]):\n logger.info(\"squeeze: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"concatenate\")\ndef concatenate_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if concatenate is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.dtype != \"float32\" for x in args[0].checked_type.fields]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not get_tensorrt_use_implicit_batch_mode():\n return True\n if int(attrs.axis) == 0:\n logger.info(\"concatenate: can't modify batch dimension.\")\n return False\n if isinstance(args[0], Tuple):\n for tuple_input in args[0].fields:\n if isinstance(tuple_input, Constant):\n logger.info(\"concatenate: can't concatenate tensors with constants.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv2d_transpose\")\ndef conv2d_transpose_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv2d_transpose is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.data_layout != \"NCHW\":\n logger.info(\"nn.conv2d_transpose: data_layout is %s but must be NCHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIHW\":\n logger.info(\n \"nn.conv2d_transpose: kernel_layout is %s but must be OIHW.\", attrs.kernel_layout\n )\n return False\n if attrs.out_layout and attrs.out_layout != \"NCHW\":\n logger.info(\"nn.conv2d_transpose: out_layout is %s but must be NCHW.\", attrs.out_layout)\n return False\n if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):\n logger.info(\"nn.conv2d_transpose: dilation rate must be 1.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"transpose\")\ndef transpose_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if transpose is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if get_tensorrt_use_implicit_batch_mode() and int(attrs.axes[0]) != 0:\n logger.info(\"transpose: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"layout_transform\")\ndef layout_transform_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if layout_transform is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if (attrs.src_layout, attrs.dst_layout) not in [\n (\"NCHW\", \"NHWC\"),\n (\"NHWC\", \"NCHW\"),\n (\"NDHWC\", \"NCDHW\"),\n (\"NCDHW\", \"NDHWC\"),\n ]:\n logger.info(\n \"layout_transform: %s to %s is not supported.\", attrs.src_layout, attrs.dst_layout\n )\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"reshape\")\ndef reshape_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if reshape is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if args[0].checked_type.dtype != \"float32\":\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if any([x < -1 for x in map(int, attrs.newshape)]):\n logger.info(\"reshape: new shape dims must be explicit.\")\n return False\n if get_tensorrt_use_implicit_batch_mode():\n shape = args[0].checked_type.shape\n new_shape = attrs.newshape\n if len(new_shape) == 0 or len(shape) == 0:\n logger.info(\"reshape: Can't reshape to or from scalar.\")\n return False\n\n dynamic_reshape = any([isinstance(x, tvm.tir.expr.Any) for x in shape])\n\n if dynamic_reshape:\n # Make sure that the batch dim is unmodified.\n if int(new_shape[0]) < 0:\n for shape_val, new_shape_val in enumerate(shape[1:], new_shape[1:]):\n if not (\n isinstance(shape_val, int)\n and isinstance(new_shape_val, int)\n and int(shape_val) == int(new_shape_val)\n ):\n return False\n elif int(new_shape[0]) > 0:\n if not (\n isinstance(shape[0], int)\n and isinstance(new_shape[0], int)\n and int(shape[0]) == int(new_shape[0])\n ):\n return False\n return True\n shape = list(map(int, shape))\n new_shape = list(map(int, new_shape))\n\n # TRT cannot modify batch dimension.\n original_volume = np.prod(shape)\n # First, resolve 0.\n for i, value in enumerate(new_shape):\n if value == 0:\n new_shape[i] = shape[i]\n # Resolve -1.\n for i, value in enumerate(new_shape):\n if value == -1:\n new_shape[i] = original_volume // np.prod([x for x in new_shape if x != -1])\n # Remove batch dimension and see if volumes match\n if shape[0] != new_shape[0]:\n logger.info(\"reshape: can't modify batch dimension.\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.pad\")\ndef pad_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.pad is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if attrs.pad_mode != \"constant\":\n logger.info(\"nn.pad: pad mode is %s but must be constant.\", attrs.pad_mode)\n return False\n if float(attrs.pad_value) != 0.0:\n logger.info(\"nn.pad: pad value is %f but must be 0.0.\", float(attrs.pad_value))\n return False\n if any([x != 0 for x in attrs.pad_width[0]]) or any([x != 0 for x in attrs.pad_width[1]]):\n logger.info(\"nn.pad: can't pad batch or channel dimensions.\")\n return False\n if len(attrs.pad_width) == 5 and any([x != 0 for x in attrs.pad_width[2]]):\n logger.info(\"nn.pad: can only pad last two dimensions for 5D inputs.\")\n return True\n\n\n@_register_external_dynamic_check_func(\"strided_slice\")\ndef strided_slice_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if strided_slice is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if args[0].checked_type.dtype != \"float32\":\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((5, 1, 5))(attrs, args, \"strided_slice\"):\n return False\n if get_tensorrt_use_implicit_batch_mode():\n batch_dim_begin_modified = attrs.begin[0] is not None and int(attrs.begin[0]) != 0\n batch_dim_end_modified = (\n attrs.end[0] is not None\n and int(attrs.end[0]) != -1\n and int(attrs.end[0]) != int(args[0].checked_type.shape[0])\n )\n if batch_dim_begin_modified or batch_dim_end_modified:\n logger.info(\"strided_slice: can't modify batch dimension.\")\n return False\n if any([x is not None and x <= 0 for x in attrs.strides]):\n logger.info(\"strided_slice: stride must be positive\")\n return False\n for i in range(0, len(args[0].checked_type.shape)):\n begin = int(attrs.begin[i])\n if attrs.slice_mode == \"end\":\n end = (\n int(attrs.end[i])\n if attrs.end[i] is not None and int(attrs.end[i]) != -1\n else args[0].checked_type.shape[i]\n )\n size = int(end) - int(begin)\n elif attrs.slice_mode == \"size\":\n size = (\n int(attrs.end[i])\n if attrs.end[i] is not None and int(attrs.end[i]) != -1\n else args[0].checked_type.shape[i] - begin\n )\n else:\n logger.warning(\"strided_slice: unknown slice mode encountered\")\n\n if int(size) < 1:\n logger.info(\"strided_slice: size of slice must be at least 1\")\n return False\n\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.adaptive_max_pool2d\")\ndef adaptive_max_pool2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.adaptive_max_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):\n logger.info(\"nn.adaptive_max_pool2d: output size must be (1, 1).\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.adaptive_avg_pool2d\")\ndef adaptive_avg_pool2d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.adaptive_avg_pool2d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):\n logger.info(\"nn.adaptive_avg_pool2d: output size must be (1, 1).\")\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv3d\")\ndef conv3d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv3d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.conv3d\"):\n return False\n if attrs.data_layout != \"NCDHW\":\n logger.info(\"nn.conv3d: data_layout is %s but must be NCDHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIDHW\":\n logger.info(\"nn.conv3d: kernel_layout is %s but must be OIDHW.\", attrs.kernel_layout)\n return False\n if attrs.out_layout and attrs.out_layout != \"NCDHW\":\n logger.info(\"nn.conv3d: out_layout is %s but must be NCDHW.\", attrs.out_layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.max_pool3d\")\ndef max_pool_3d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.max_pool3d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.max_pool3d\"):\n return False\n if attrs.layout != \"NCDHW\":\n logger.info(\"nn.max_pool3d: layout is %s but must be NCDHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.avg_pool3d\")\ndef avg_pool_3d_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.avg_pool3d is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.avg_pool3d\"):\n return False\n if attrs.layout != \"NCDHW\":\n logger.info(\"nn.avg_pool3d: layout is %s but must be NCDHW.\", attrs.layout)\n return False\n return True\n\n\n@_register_external_dynamic_check_func(\"nn.conv3d_transpose\")\ndef conv3d_transpose_annotate_fn(expr): # pylint: disable=unused-variable\n \"\"\"Check if nn.conv3d_transpose is supported by TensorRT.\"\"\"\n\n attrs, args = expr.attrs, expr.args\n if any([x.checked_type.dtype != \"float32\" for x in args]):\n logger.info(\"Only float32 inputs are supported for TensorRT.\")\n return False\n if not trt_version_annotate_fn((6, 0, 1))(attrs, args, \"nn.conv3d_transpose\"):\n return False\n if attrs.data_layout != \"NCDHW\":\n logger.info(\"nn.conv3d_transpose: data_layout is %s but must be NCDHW.\", attrs.data_layout)\n return False\n if attrs.kernel_layout != \"OIDHW\":\n logger.info(\n \"nn.conv3d_transpose: kernel_layout is %s but must be OIDHW.\", attrs.kernel_layout\n )\n return False\n if attrs.out_layout and attrs.out_layout != \"NCDHW\":\n logger.info(\"nn.conv3d_transpose: out_layout is %s but must be NCDHW.\", attrs.out_layout)\n return False\n if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):\n logger.info(\"nn.conv3d_transpose: dilation rate must be 1.\")\n return False\n if attrs.output_padding and any([x != 0 for x in map(int, attrs.output_padding)]):\n logger.info(\"nn.conv3d_transpose: output padding is not supported.\")\n return False\n return True\n\n\nclass IsComputeIntensiveGraph(ExprVisitor):\n \"\"\"\n Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and\n its transpose, dense and batch mat-mul.\n \"\"\"\n\n def __init__(self):\n ExprVisitor.__init__(self)\n self.is_compute_intensive = False\n\n def visit_call(self, call):\n compute_intensive_ops = set(\n [\n \"nn.conv2d\",\n \"nn.conv2d_transpose\",\n \"nn.conv3d\",\n \"nn.conv3d_transpose\",\n \"nn.dense\",\n \"nn.batch_matmul\",\n ]\n )\n if isinstance(call.op, tvm.tir.op.Op):\n if str(call.op) in compute_intensive_ops:\n self.is_compute_intensive = True\n\n return super().visit_call(call)\n\n def is_graph_compute_intensive(self, subgraph) -> bool:\n \"\"\"\n This function recursively visits the graph and checks if it's compute intensive\"\n \"\"\"\n self.visit(subgraph)\n return self.is_compute_intensive\n\n\ndef is_valid_subgraph(params, body):\n \"\"\"Final check on whether the subgraph is valid and should be offloaded to TensorRT.\"\"\"\n # Remove invalid subgraphs for implicit batch mode.\n if get_tensorrt_use_implicit_batch_mode():\n input_batch_sizes = []\n for var in params:\n # In implicit batch mode, all inputs must have same batch size\n # TODO: (codeislife99) : Fix different dynamic batch size inputs\n\n if isinstance(var.checked_type, relay.TupleType):\n for tupe_type in var.checked_type.fields:\n # Scalar inputs not allowed\n if len(tupe_type.shape) == 0:\n logger.info(\"tensorrt: scalar inputs not supported\")\n return False\n\n if not isinstance(tupe_type.shape[0], tvm.tir.expr.Any):\n input_batch_sizes.append(int(tupe_type.shape[0]))\n else:\n # Scalar inputs not allowed\n if len(var.checked_type.shape) == 0:\n logger.info(\"tensorrt: scalar inputs not supported\")\n return False\n if not isinstance(var.checked_type.shape[0], tvm.tir.expr.Any):\n input_batch_sizes.append(int(var.checked_type.shape[0]))\n if len(input_batch_sizes) > 1 and len(set(input_batch_sizes)) != 1:\n logger.info(\"tensorrt: inputs have different batch sizes\")\n return False\n if (\n get_tensorrt_remove_no_mac_subgraphs()\n and not IsComputeIntensiveGraph().is_graph_compute_intensive(body)\n ):\n return False\n return True\n\n\ndef prune_tensorrt_subgraphs(mod):\n \"\"\"\n Removes invalid subgraphs and those with no multiply-accumulates (if remove_no_max_subgraphs\n is set).\n \"\"\"\n\n class SubgraphRemover(ExprMutator):\n \"\"\"\n Reverts subgraphs in subgraphs_to_remove back to TVM instead of using an external codegen.\n \"\"\"\n\n def __init__(self, subgraphs_to_remove, mod, new_mod):\n ExprMutator.__init__(self)\n self.subgraphs_to_remove = subgraphs_to_remove\n self.mod = mod\n self.new_mod = new_mod\n\n def visit_call(self, call):\n if isinstance(call.op, GlobalVar):\n name = call.op.name_hint\n if name in self.subgraphs_to_remove:\n # \"Inline\" the subgraph back into new main function.\n func = self.mod[name]\n var_map = {}\n for arg, param in zip(call.args, func.params):\n var_map[param] = super().visit(arg)\n new_body = relay.bind(func.body, var_map)\n return new_body\n if name != \"main\":\n args = []\n for arg in call.args:\n args.append(super().visit(arg))\n return call.op(*args)\n return super().visit_call(call)\n\n subgraphs_to_remove = []\n # Remove invalid subgraphs\n for subgraph in mod.get_global_vars():\n name = subgraph.name_hint\n if not mod[name].attrs or mod[name].attrs[\"Compiler\"] != \"tensorrt\":\n continue\n if not is_valid_subgraph(mod[name].params, mod[name].body):\n subgraphs_to_remove.append(name)\n # Create new pruned module\n new_mod = tvm.IRModule(mod.functions, mod.type_definitions)\n new_mod[\"main\"] = SubgraphRemover(subgraphs_to_remove, mod, new_mod).visit(mod[\"main\"])\n return new_mod\n\n\nclass RemoveDropout(ExprMutator):\n \"\"\"\n Removes all nn.dropout from an expr.\n \"\"\"\n\n def visit_tuple_getitem(self, op):\n visit = super().visit_tuple_getitem(op)\n if visit.index != 0:\n return visit\n if (\n isinstance(visit.tuple_value, Call)\n and visit.tuple_value.op.name == \"nn.dropout\"\n and visit.index == 0\n ):\n return visit.tuple_value.args[0]\n return visit\n\n\n@transform.function_pass(opt_level=0)\nclass RemoveDropoutPass:\n def transform_function(self, func, mod, _):\n return RemoveDropout().visit(func)\n"
] |
[
[
"numpy.prod"
]
] |
pureexe/point-cloud-projection
|
[
"10d492d8caa7de081a884fe4ae49b1d7efda4b62"
] |
[
"projection1.py"
] |
[
"from scipy.io import loadmat\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfocal = 30\nmat = loadmat('data/0001_mesh_rightfar.mat')\ncolor = mat['colors']\nvertex = mat['vertices']\ncamera_matrix = np.loadtxt('data/0001_camera_matrix_rightfar.txt')\n\n# Extensic\nextrinsic = camera_matrix\n\n#Intrinsic\nintrinsic = np.array([\n [focal, 0, 128],\n [0, focal, 128],\n [0, 0, 1]\n])\n\n#Add 1 into vextex last channel\nposition = np.ones([vertex.shape[0], 4])\nposition[:,:3] = vertex\n\n# APPLY CAMERA MODEL\n#projected = np.matmul(intrinsic,np.matmul(extrinsic,position.transpose()))\nprojected = np.matmul(intrinsic,position.transpose()[:3])\n\n\n#NORMALLIZE\nprojected = projected / projected[2,:]\nprojected = projected.transpose()\nprojected[:,0] = np.round(projected[:,0])\nprojected[:,1] = np.round(projected[:,1])\nprojected = projected.astype(np.int32)\nimage = np.zeros((256,256,3))\nfor i in range(len(color)):\n try:\n u,v,_ = projected[i]\n image[v,u,:] = color[i]\n except:\n pass\nplt.imsave(\"50_50.png\",image)\nplt.imshow(image)\nplt.show()\n\n"
] |
[
[
"matplotlib.pyplot.imsave",
"matplotlib.pyplot.imshow",
"scipy.io.loadmat",
"matplotlib.pyplot.show",
"numpy.ones",
"numpy.round",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] |
BodenmillerGroup/spherpro
|
[
"0bcbc76942c7bae82deddcdc4fa5239e04442553"
] |
[
"spherpro/bromodules/filter_objectfilters.py"
] |
[
"\"\"\"\nA class to generate and add filters to the filter table.\n\"\"\"\nimport pandas as pd\nimport sqlalchemy as sa\n\nimport spherpro.bromodules.filter_base as filter_base\nimport spherpro.db as db\n\n# TODO: move to default configuration?\nFILTERSTACKNAME = \"FilterStack\"\nFILTERTYPENAME = \"filter\"\n\n\nclass ObjectFilterLib(filter_base.BaseFilter):\n def __init__(self, bro):\n super().__init__(bro)\n\n def add_filtername(self, filtername):\n \"\"\"\n Adds a filtername to the objectfilter\n Args:\n filtername: a string\n \"\"\"\n fil_id = (\n self.data.main_session.query(db.object_filter_names.object_filter_id)\n .filter(db.object_filter_names.object_filter_name == filtername)\n .scalar()\n )\n if fil_id is None:\n new_id = self.data._query_new_ids(\n db.object_filter_names.object_filter_id, 1\n )\n new_id = list(new_id)\n fil_id = new_id[0]\n dat = pd.DataFrame(\n {\n db.object_filter_names.object_filter_name.key: [filtername],\n db.object_filter_names.object_filter_id.key: new_id,\n }\n )\n self.data._add_generic_tuple(dat, db.object_filter_names)\n\n fil_id = int(fil_id)\n\n return fil_id\n\n def write_filter_to_db(self, filterdata, filtername, drop=True, replace=True):\n \"\"\"\n Writes a dataframe containing Filterdata to the DB.\n Args:\n filterdata: DataFrame containing the filterdata. Needs to contain a column filter_value\n and object_id\n filtername: String stating the Filtername\n \"\"\"\n filterdata = filterdata.loc[\n :, [db.objects.object_id.key, db.object_filters.filter_value.key]\n ]\n filterdata.loc[\n :, db.object_filter_names.object_filter_id.key\n ] = self.add_filtername(filtername)\n\n if drop:\n self.delete_filter_by_name(filtername)\n filterdata = filterdata.dropna()\n filterdata[db.object_filters.filter_value.key] = filterdata[\n db.object_filters.filter_value.key\n ].astype(int)\n self.data._bulkinsert(filterdata, db.object_filters)\n\n def delete_filter_by_name(self, filtername):\n fid = (\n self.session.query(db.object_filter_names.object_filter_id).filter(\n db.object_filter_names.object_filter_name == filtername\n )\n ).one()[0]\n (\n self.session.query(db.object_filters).filter(\n db.object_filters.object_filter_id == fid\n )\n ).delete()\n self.session.commit()\n\n def get_combined_filterquery(self, object_filters):\n \"\"\"\n Get a filter query for the requested filters:\n Args:\n object_filters: list of format [(filtername1, filtervalue1),\n (filtername2, filtervalue2), ... ]\n image_filters: list of same format as object_filters\n returns: a subquery that can be joined to another query\n \"\"\"\n\n subquerys = [\n self.data.main_session.query(db.object_filters.object_id)\n .join(db.object_filter_names)\n .filter(db.object_filter_names.object_filter_name == filname)\n .filter(db.object_filters.filter_value == int(filval))\n .subquery(filname + str(filval))\n for filname, filval in object_filters\n ]\n query = self.data.main_session.query(db.objects.object_id)\n for sq in subquerys:\n query = query.join(sq, db.objects.object_id == sq.c.object_id)\n return query.subquery()\n\n def get_combined_filterstatement(self, object_filters):\n \"\"\"\n Get a filter statement for the requested filters:\n Args:\n object_filters: list of format [(filtername1, filtervalue1),\n (filtername2, filtervalue2), ... ]\n returns: a subquery that can be joined to another query\n \"\"\"\n subquery = self.get_combined_filterquery(object_filters)\n fil = sa.and_(db.objects.object_id == subquery.c.object_id)\n return fil\n\n def delete_by_filter(self, filter_statement):\n \"\"\"\n Deletes objects from the 'valid_objects' list based\n on an object filter statement.\n\n Args:\n filter_statement: an object filter statement\n\n Returns:\n\n \"\"\"\n stmt = self.session.query(db.valid_objects).filter(\n db.valid_objects.object_id.in_(filter_statement)\n )\n stmt.delete(synchronize_session=\"fetch\")\n self.session.commit()\n"
] |
[
[
"pandas.DataFrame"
]
] |
YanhuiJoe/SCAN-BGLL-community-detection
|
[
"699b8af9bc496a9afbfee57b4ce750a386896726"
] |
[
"tst.py"
] |
[
"import networkx as nx\nimport pandas as pd\nimport math\nfrom sklearn import metrics\nfrom BGLL import PyLouvain\nimport numpy as np\n\n#\n# G = nx.read_gml('data/lesmis.gml', label='label')\n# print(G)\n# G = nx.Graph()\n# f = pd.read_csv('t.txt', sep=',', header=None)\n# edge_list = []\n# for i, j in zip(f[0], f[1]):\n# \tedge_list.append((i, j))\n# G.add_edges_from(edge_list)\n#\n# b = nx.betweenness_centrality(G)\n# m = max(b.values())\n# print(m)\n\n# pyl = PyLouvain.from_file(\"data/karate.txt\")\n# partition, q = pyl.apply_method()\n# print(partition)\n# print(q)\n\nlabels_pre = [[1, 4, 10, 12, 13, 15], [17, 18, 19, 21, 22, 26, 0, 7, 8, 11, 14, 16, 20, 24, 25, 27, 33],\n [2, 5, 6, 23, 28, 32, 3, 29], [9, 30, 31]]\nlabels_pre = np.array(labels_pre)\n\nm = max(max(labels_pre[i]) for i in range(labels_pre.size))\n# print(m)\nlabels_pre_ = [-1 for i in range(m + 1)]\nlabels_true = [1 for i in range(m + 1)]\nfor i in range(labels_pre.size):\n for j in labels_pre[i]:\n labels_pre_[j] = i\n\nprint(labels_pre_)\nprint(labels_true)\nNMI = metrics.normalized_mutual_info_score(labels_true, labels_pre_)\nprint(NMI)"
] |
[
[
"numpy.array",
"sklearn.metrics.normalized_mutual_info_score"
]
] |
lte2000/cwfx
|
[
"dc8daee44cea4b7c0286a7676e4a2829744fee64"
] |
[
"result/each_hangye.py"
] |
[
"# coding: utf-8\r\n\"\"\"\r\n\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport csv\r\nimport io\r\nimport time\r\nimport traceback\r\nimport logging\r\n\r\n\r\nif __name__ == '__main__':\r\n filtered_df = pd.read_csv(r\"filtered.csv\", encoding='gbk', sep='\\t', index_col=None, dtype={u'代码': str})\r\n each_hangye = {}\r\n for row in filtered_df.itertuples(index=False):\r\n hangye = row[2]\r\n code = row[0]\r\n name = row[1]\r\n if each_hangye.has_key(hangye):\r\n if len(each_hangye[hangye]) < 2:\r\n each_hangye[hangye].append((code, name))\r\n else:\r\n each_hangye[hangye] = [(code, name)]\r\n\r\n with io.open(\"hangye_stock.csv\", mode=\"w\", encoding=\"gbk\") as f:\r\n f.write(u\"代码,名称,行业\\n\")\r\n for k,v in each_hangye.items():\r\n for c in v:\r\n f.write(u\"{},{},{}\\n\".format(k, c[0], c[1]))"
] |
[
[
"pandas.read_csv"
]
] |
cruzdanilo/dask
|
[
"965c9e401801689a6a68cec5c0529f912a459960"
] |
[
"dask/utils.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nimport codecs\nimport functools\nimport inspect\nimport io\nimport math\nimport os\nimport re\nimport shutil\nimport struct\nimport sys\nimport tempfile\nfrom errno import ENOENT\nfrom collections import Iterator\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom threading import Lock\nimport multiprocessing as mp\nimport uuid\nfrom weakref import WeakValueDictionary\n\nfrom .compatibility import (long, getargspec, BZ2File, GzipFile, LZMAFile, PY3,\n urlsplit, unicode)\nfrom .core import get_deps\nfrom .context import _globals\nfrom .optimize import key_split # noqa: F401\n\n\nsystem_encoding = sys.getdefaultencoding()\nif system_encoding == 'ascii':\n system_encoding = 'utf-8'\n\n\ndef deepmap(func, *seqs):\n \"\"\" Apply function inside nested lists\n\n >>> inc = lambda x: x + 1\n >>> deepmap(inc, [[1, 2], [3, 4]])\n [[2, 3], [4, 5]]\n\n >>> add = lambda x, y: x + y\n >>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])\n [[11, 22], [33, 44]]\n \"\"\"\n if isinstance(seqs[0], (list, Iterator)):\n return [deepmap(func, *items) for items in zip(*seqs)]\n else:\n return func(*seqs)\n\n\ndef homogeneous_deepmap(func, seq):\n if not seq:\n return seq\n n = 0\n tmp = seq\n while isinstance(tmp, list):\n n += 1\n tmp = tmp[0]\n\n return ndeepmap(n, func, seq)\n\n\ndef ndeepmap(n, func, seq):\n \"\"\" Call a function on every element within a nested container\n\n >>> def inc(x):\n ... return x + 1\n >>> L = [[1, 2], [3, 4, 5]]\n >>> ndeepmap(2, inc, L)\n [[2, 3], [4, 5, 6]]\n \"\"\"\n if n == 1:\n return [func(item) for item in seq]\n elif n > 1:\n return [ndeepmap(n - 1, func, item) for item in seq]\n elif isinstance(seq, list):\n return func(seq[0])\n else:\n return func(seq)\n\n\n@contextmanager\ndef ignoring(*exceptions):\n try:\n yield\n except exceptions:\n pass\n\n\ndef import_required(mod_name, error_msg):\n \"\"\"Attempt to import a required dependency.\n\n Raises a RuntimeError if the requested module is not available.\n \"\"\"\n try:\n return import_module(mod_name)\n except ImportError:\n raise RuntimeError(error_msg)\n\n\n@contextmanager\ndef tmpfile(extension='', dir=None):\n extension = '.' + extension.lstrip('.')\n handle, filename = tempfile.mkstemp(extension, dir=dir)\n os.close(handle)\n os.remove(filename)\n\n try:\n yield filename\n finally:\n if os.path.exists(filename):\n if os.path.isdir(filename):\n shutil.rmtree(filename)\n else:\n with ignoring(OSError):\n os.remove(filename)\n\n\n@contextmanager\ndef tmpdir(dir=None):\n dirname = tempfile.mkdtemp(dir=dir)\n\n try:\n yield dirname\n finally:\n if os.path.exists(dirname):\n if os.path.isdir(dirname):\n with ignoring(OSError):\n shutil.rmtree(dirname)\n else:\n with ignoring(OSError):\n os.remove(dirname)\n\n\n@contextmanager\ndef filetext(text, extension='', open=open, mode='w'):\n with tmpfile(extension=extension) as filename:\n f = open(filename, mode=mode)\n try:\n f.write(text)\n finally:\n try:\n f.close()\n except AttributeError:\n pass\n\n yield filename\n\n\n@contextmanager\ndef changed_cwd(new_cwd):\n old_cwd = os.getcwd()\n os.chdir(new_cwd)\n try:\n yield\n finally:\n os.chdir(old_cwd)\n\n\n@contextmanager\ndef tmp_cwd(dir=None):\n with tmpdir(dir) as dirname:\n with changed_cwd(dirname):\n yield dirname\n\n\n@contextmanager\ndef noop_context():\n yield\n\n\ndef repr_long_list(seq):\n \"\"\"\n\n >>> repr_long_list(list(range(100)))\n '[0, 1, 2, ..., 98, 99]'\n \"\"\"\n if len(seq) < 8:\n return repr(seq)\n else:\n return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]\n\n\nclass IndexCallable(object):\n \"\"\" Provide getitem syntax for functions\n\n >>> def inc(x):\n ... return x + 1\n\n >>> I = IndexCallable(inc)\n >>> I[3]\n 4\n \"\"\"\n __slots__ = 'fn',\n\n def __init__(self, fn):\n self.fn = fn\n\n def __getitem__(self, key):\n return self.fn(key)\n\n\n@contextmanager\ndef filetexts(d, open=open, mode='t', use_tmpdir=True):\n \"\"\" Dumps a number of textfiles to disk\n\n d - dict\n a mapping from filename to text like {'a.csv': '1,1\\n2,2'}\n\n Since this is meant for use in tests, this context manager will\n automatically switch to a temporary current directory, to avoid\n race conditions when running tests in parallel.\n \"\"\"\n with (tmp_cwd() if use_tmpdir else noop_context()):\n for filename, text in d.items():\n f = open(filename, 'w' + mode)\n try:\n f.write(text)\n finally:\n try:\n f.close()\n except AttributeError:\n pass\n\n yield list(d)\n\n for filename in d:\n if os.path.exists(filename):\n with ignoring(OSError):\n os.remove(filename)\n\n\ncompressions = {'gz': 'gzip', 'bz2': 'bz2', 'xz': 'xz'}\n\n\ndef infer_compression(filename):\n extension = os.path.splitext(filename)[-1].strip('.')\n return compressions.get(extension, None)\n\n\nopens = {'gzip': GzipFile, 'bz2': BZ2File, 'xz': LZMAFile}\n\n\ndef open(filename, mode='rb', compression=None, **kwargs):\n if compression == 'infer':\n compression = infer_compression(filename)\n return opens.get(compression, io.open)(filename, mode, **kwargs)\n\n\ndef get_bom(fn, compression=None):\n \"\"\"\n Get the Byte Order Mark (BOM) if it exists.\n \"\"\"\n boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))\n with open(fn, mode='rb', compression=compression) as f:\n f.seek(0)\n bom = f.read(2)\n f.seek(0)\n if bom in boms:\n return bom\n else:\n return b''\n\n\ndef get_bin_linesep(encoding, linesep):\n \"\"\"\n Simply doing `linesep.encode(encoding)` does not always give you\n *just* the linesep bytes, for some encodings this prefix's the\n linesep bytes with the BOM. This function ensures we just get the\n linesep bytes.\n \"\"\"\n if encoding == 'utf-16':\n return linesep.encode('utf-16')[2:] # [2:] strips bom\n else:\n return linesep.encode(encoding)\n\n\ndef textblock(filename, start, end, compression=None, encoding=system_encoding,\n linesep=os.linesep, buffersize=4096):\n \"\"\"Pull out a block of text from a file given start and stop bytes.\n\n This gets data starting/ending from the next linesep delimiter. Each block\n consists of bytes in the range [start,end[, i.e. the stop byte is excluded.\n If `start` is 0, then `start` corresponds to the true start byte. If\n `start` is greater than 0 and does not point to the beginning of a new\n line, then `start` is incremented until it corresponds to the start byte of\n the next line. If `end` does not point to the beginning of a new line, then\n the line that begins before `end` is included in the block although its\n last byte exceeds `end`.\n\n Examples\n --------\n >> with open('myfile.txt', 'wb') as f:\n .. f.write('123\\n456\\n789\\nabc')\n\n In the example below, 1 and 10 don't line up with endlines.\n\n >> u''.join(textblock('myfile.txt', 1, 10))\n '456\\n789\\n'\n \"\"\"\n # Make sure `linesep` is not a byte string because\n # `io.TextIOWrapper` in Python versions other than 2.7 dislike byte\n # strings for the `newline` argument.\n linesep = str(linesep)\n\n # Get byte representation of the line separator.\n bin_linesep = get_bin_linesep(encoding, linesep)\n bin_linesep_len = len(bin_linesep)\n\n if buffersize < bin_linesep_len:\n error = ('`buffersize` ({0:d}) must be at least as large as the '\n 'number of line separator bytes ({1:d}).')\n raise ValueError(error.format(buffersize, bin_linesep_len))\n\n chunksize = end - start\n\n with open(filename, 'rb', compression) as f:\n with io.BufferedReader(f) as fb:\n # If `start` does not correspond to the beginning of the file, we\n # need to move the file pointer to `start - len(bin_linesep)`,\n # search for the position of the next a line separator, and set\n # `start` to the position after that line separator.\n if start > 0:\n # `start` is decremented by `len(bin_linesep)` to detect the\n # case where the original `start` value corresponds to the\n # beginning of a line.\n start = max(0, start - bin_linesep_len)\n # Set the file pointer to `start`.\n fb.seek(start)\n # Number of bytes to shift the file pointer before reading a\n # new chunk to make sure that a multi-byte line separator, that\n # is split by the chunk reader, is still detected.\n shift = 1 - bin_linesep_len\n while True:\n buf = f.read(buffersize)\n if len(buf) < bin_linesep_len:\n raise StopIteration\n try:\n # Find the position of the next line separator and add\n # `len(bin_linesep)` which yields the position of the\n # first byte of the next line.\n start += buf.index(bin_linesep)\n start += bin_linesep_len\n except ValueError:\n # No line separator was found in the current chunk.\n # Before reading the next chunk, we move the file\n # pointer back `len(bin_linesep) - 1` bytes to make\n # sure that a multi-byte line separator, that may have\n # been split by the chunk reader, is still detected.\n start += len(buf)\n start += shift\n fb.seek(shift, os.SEEK_CUR)\n else:\n # We have found the next line separator, so we need to\n # set the file pointer to the first byte of the next\n # line.\n fb.seek(start)\n break\n\n with io.TextIOWrapper(fb, encoding, newline=linesep) as fbw:\n # Retrieve and yield lines until the file pointer reaches\n # `end`.\n while start < end:\n line = next(fbw)\n # We need to encode the line again to get the byte length\n # in order to correctly update `start`.\n bin_line_len = len(line.encode(encoding))\n if chunksize < bin_line_len:\n error = ('`chunksize` ({0:d}) is less than the line '\n 'length ({1:d}). This may cause duplicate '\n 'processing of this line. It is advised to '\n 'increase `chunksize`.')\n raise IOError(error.format(chunksize, bin_line_len))\n\n yield line\n start += bin_line_len\n\n\ndef concrete(seq):\n \"\"\" Make nested iterators concrete lists\n\n >>> data = [[1, 2], [3, 4]]\n >>> seq = iter(map(iter, data))\n >>> concrete(seq)\n [[1, 2], [3, 4]]\n \"\"\"\n if isinstance(seq, Iterator):\n seq = list(seq)\n if isinstance(seq, (tuple, list)):\n seq = list(map(concrete, seq))\n return seq\n\n\ndef skip(func):\n pass\n\n\ndef pseudorandom(n, p, random_state=None):\n \"\"\" Pseudorandom array of integer indexes\n\n >>> pseudorandom(5, [0.5, 0.5], random_state=123)\n array([1, 0, 0, 1, 1], dtype=int8)\n\n >>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)\n array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)\n \"\"\"\n import numpy as np\n p = list(p)\n cp = np.cumsum([0] + p)\n assert np.allclose(1, cp[-1])\n assert len(p) < 256\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n x = random_state.random_sample(n)\n out = np.empty(n, dtype='i1')\n\n for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):\n out[(x >= low) & (x < high)] = i\n return out\n\n\ndef random_state_data(n, random_state=None):\n \"\"\"Return a list of arrays that can initialize\n ``np.random.RandomState``.\n\n Parameters\n ----------\n n : int\n Number of tuples to return.\n random_state : int or np.random.RandomState, optional\n If an int, is used to seed a new ``RandomState``.\n \"\"\"\n import numpy as np\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n maxuint32 = np.iinfo(np.uint32).max\n return [(random_state.rand(624) * maxuint32).astype('uint32')\n for i in range(n)]\n\n\ndef is_integer(i):\n \"\"\"\n >>> is_integer(6)\n True\n >>> is_integer(42.0)\n True\n >>> is_integer('abc')\n False\n \"\"\"\n import numpy as np\n if isinstance(i, (int, long)):\n return True\n if isinstance(i, float):\n return (i).is_integer()\n if issubclass(type(i), np.integer):\n return i\n else:\n return False\n\n\ndef file_size(fn, compression=None):\n \"\"\" Size of a file on disk\n\n If compressed then return the uncompressed file size\n \"\"\"\n if compression == 'gzip':\n with open(fn, 'rb') as f:\n f.seek(-4, 2)\n result = struct.unpack('I', f.read(4))[0]\n elif compression:\n # depending on the implementation, this may be inefficient\n with open(fn, 'rb', compression) as f:\n result = f.seek(0, 2)\n else:\n result = os.stat(fn).st_size\n return result\n\n\nONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,\n classmethod, complex, dict, dir, enumerate, eval,\n float, format, frozenset, hash, hex, id, int, iter,\n len, list, max, min, next, oct, open, ord, range,\n repr, reversed, round, set, slice, sorted,\n staticmethod, str, sum, tuple,\n type, vars, zip, memoryview])\nif PY3:\n ONE_ARITY_BUILTINS.add(ascii) # noqa: F821\nMULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,\n isinstance, issubclass, map, pow, setattr])\n\n\ndef takes_multiple_arguments(func):\n \"\"\" Does this function take multiple arguments?\n\n >>> def f(x, y): pass\n >>> takes_multiple_arguments(f)\n True\n\n >>> def f(x): pass\n >>> takes_multiple_arguments(f)\n False\n\n >>> def f(x, y=None): pass\n >>> takes_multiple_arguments(f)\n False\n\n >>> def f(*args): pass\n >>> takes_multiple_arguments(f)\n True\n\n >>> class Thing(object):\n ... def __init__(self, a): pass\n >>> takes_multiple_arguments(Thing)\n False\n\n \"\"\"\n if func in ONE_ARITY_BUILTINS:\n return False\n elif func in MULTI_ARITY_BUILTINS:\n return True\n\n try:\n spec = getargspec(func)\n except:\n return False\n\n try:\n is_constructor = spec.args[0] == 'self' and isinstance(func, type)\n except:\n is_constructor = False\n\n if spec.varargs:\n return True\n\n if spec.defaults is None:\n return len(spec.args) - is_constructor != 1\n return len(spec.args) - len(spec.defaults) - is_constructor > 1\n\n\nclass Dispatch(object):\n \"\"\"Simple single dispatch.\"\"\"\n def __init__(self, name=None):\n self._lookup = {}\n self._lazy = {}\n if name:\n self.__name__ = name\n\n def register(self, type, func=None):\n \"\"\"Register dispatch of `func` on arguments of type `type`\"\"\"\n def wrapper(func):\n if isinstance(type, tuple):\n for t in type:\n self.register(t, func)\n else:\n self._lookup[type] = func\n return func\n\n return wrapper(func) if func is not None else wrapper\n\n def register_lazy(self, toplevel, func=None):\n \"\"\"\n Register a registration function which will be called if the\n *toplevel* module (e.g. 'pandas') is ever loaded.\n \"\"\"\n def wrapper(func):\n self._lazy[toplevel] = func\n return func\n\n return wrapper(func) if func is not None else wrapper\n\n def __call__(self, arg):\n # Fast path with direct lookup on type\n lk = self._lookup\n typ = type(arg)\n try:\n impl = lk[typ]\n except KeyError:\n pass\n else:\n return impl(arg)\n # Is a lazy registration function present?\n toplevel, _, _ = typ.__module__.partition('.')\n try:\n register = self._lazy.pop(toplevel)\n except KeyError:\n pass\n else:\n register()\n return self(arg) # recurse\n # Walk the MRO and cache the lookup result\n for cls in inspect.getmro(typ)[1:]:\n if cls in lk:\n lk[typ] = lk[cls]\n return lk[cls](arg)\n raise TypeError(\"No dispatch for {0} type\".format(typ))\n\n\ndef ensure_not_exists(filename):\n \"\"\"\n Ensure that a file does not exist.\n \"\"\"\n try:\n os.unlink(filename)\n except OSError as e:\n if e.errno != ENOENT:\n raise\n\n\ndef _skip_doctest(line):\n # NumPy docstring contains cursor and comment only example\n stripped = line.strip()\n if stripped == '>>>' or stripped.startswith('>>> #'):\n return stripped\n elif '>>>' in stripped:\n return line + ' # doctest: +SKIP'\n else:\n return line\n\n\ndef skip_doctest(doc):\n if doc is None:\n return ''\n return '\\n'.join([_skip_doctest(line) for line in doc.split('\\n')])\n\n\ndef derived_from(original_klass, version=None, ua_args=[]):\n \"\"\"Decorator to attach original class's docstring to the wrapped method.\n\n Parameters\n ----------\n original_klass: type\n Original class which the method is derived from\n version : str\n Original package version which supports the wrapped method\n ua_args : list\n List of keywords which Dask doesn't support. Keywords existing in\n original but not in Dask will automatically be added.\n \"\"\"\n def wrapper(method):\n method_name = method.__name__\n\n try:\n # do not use wraps here, as it hides keyword arguments displayed\n # in the doc\n original_method = getattr(original_klass, method_name)\n doc = original_method.__doc__\n if doc is None:\n doc = ''\n\n try:\n method_args = getargspec(method).args\n original_args = getargspec(original_method).args\n not_supported = [m for m in original_args if m not in method_args]\n except TypeError:\n not_supported = []\n\n if len(ua_args) > 0:\n not_supported.extend(ua_args)\n\n if len(not_supported) > 0:\n note = (\"\\n Notes\\n -----\\n\"\n \" Dask doesn't supports following argument(s).\\n\\n\")\n args = ''.join([' * {0}\\n'.format(a) for a in not_supported])\n doc = doc + note + args\n doc = skip_doctest(doc)\n method.__doc__ = doc\n return method\n\n except AttributeError:\n module_name = original_klass.__module__.split('.')[0]\n\n @functools.wraps(method)\n def wrapped(*args, **kwargs):\n msg = \"Base package doesn't support '{0}'.\".format(method_name)\n if version is not None:\n msg2 = \" Use {0} {1} or later to use this method.\"\n msg += msg2.format(module_name, version)\n raise NotImplementedError(msg)\n return wrapped\n return wrapper\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n # functools.partial\n if isinstance(func, functools.partial):\n return funcname(func.func)\n # methodcaller\n if isinstance(func, methodcaller):\n return func.method\n\n module_name = getattr(func, '__module__', None) or ''\n type_name = getattr(type(func), '__name__', None) or ''\n\n # toolz.curry\n if 'toolz' in module_name and 'curry' == type_name:\n return func.func_name\n # multipledispatch objects\n if 'multipledispatch' in module_name and 'Dispatcher' == type_name:\n return func.name\n\n # All other callables\n try:\n name = func.__name__\n if name == '<lambda>':\n return 'lambda'\n return name\n except:\n return str(func)\n\n\ndef ensure_bytes(s):\n \"\"\" Turn string or bytes to bytes\n\n >>> ensure_bytes(u'123')\n '123'\n >>> ensure_bytes('123')\n '123'\n >>> ensure_bytes(b'123')\n '123'\n \"\"\"\n if isinstance(s, bytes):\n return s\n if hasattr(s, 'encode'):\n return s.encode()\n msg = \"Object %s is neither a bytes object nor has an encode method\"\n raise TypeError(msg % s)\n\n\ndef ensure_unicode(s):\n \"\"\" Turn string or bytes to bytes\n\n >>> ensure_unicode(u'123')\n u'123'\n >>> ensure_unicode('123')\n u'123'\n >>> ensure_unicode(b'123')\n u'123'\n \"\"\"\n if isinstance(s, unicode):\n return s\n if hasattr(s, 'decode'):\n return s.decode()\n msg = \"Object %s is neither a bytes object nor has an encode method\"\n raise TypeError(msg % s)\n\n\ndef digit(n, k, base):\n \"\"\"\n\n >>> digit(1234, 0, 10)\n 4\n >>> digit(1234, 1, 10)\n 3\n >>> digit(1234, 2, 10)\n 2\n >>> digit(1234, 3, 10)\n 1\n \"\"\"\n return n // base**k % base\n\n\ndef insert(tup, loc, val):\n \"\"\"\n\n >>> insert(('a', 'b', 'c'), 0, 'x')\n ('x', 'b', 'c')\n \"\"\"\n L = list(tup)\n L[loc] = val\n return tuple(L)\n\n\ndef build_name_function(max_int):\n \"\"\" Returns a function that receives a single integer\n and returns it as a string padded by enough zero characters\n to align with maximum possible integer\n\n >>> name_f = build_name_function(57)\n\n >>> name_f(7)\n '07'\n >>> name_f(31)\n '31'\n >>> build_name_function(1000)(42)\n '0042'\n >>> build_name_function(999)(42)\n '042'\n >>> build_name_function(0)(0)\n '0'\n \"\"\"\n # handle corner cases max_int is 0 or exact power of 10\n max_int += 1e-8\n\n pad_length = int(math.ceil(math.log10(max_int)))\n\n def name_function(i):\n return str(i).zfill(pad_length)\n\n return name_function\n\n\ndef infer_storage_options(urlpath, inherit_storage_options=None):\n \"\"\" Infer storage options from URL path and merge it with existing storage\n options.\n\n Parameters\n ----------\n urlpath: str or unicode\n Either local absolute file path or URL (hdfs://namenode:8020/file.csv)\n storage_options: dict (optional)\n Its contents will get merged with the inferred information from the\n given path\n\n Returns\n -------\n Storage options dict.\n\n Examples\n --------\n >>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP\n {\"protocol\": \"file\", \"path\", \"/mnt/datasets/test.csv\"}\n >>> infer_storage_options(\n ... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',\n ... inherit_storage_options={'extra': 'value'}) # doctest: +SKIP\n {\"protocol\": \"hdfs\", \"username\": \"username\", \"password\": \"pwd\",\n \"host\": \"node\", \"port\": 123, \"path\": \"/mnt/datasets/test.csv\",\n \"url_query\": \"q=1\", \"extra\": \"value\"}\n \"\"\"\n # Handle Windows paths including disk name in this special case\n if re.match(r'^[a-zA-Z]:[\\\\/]', urlpath):\n return {'protocol': 'file',\n 'path': urlpath}\n\n parsed_path = urlsplit(urlpath)\n protocol = parsed_path.scheme or 'file'\n path = parsed_path.path\n if protocol == 'file':\n # Special case parsing file protocol URL on Windows according to:\n # https://msdn.microsoft.com/en-us/library/jj710207.aspx\n windows_path = re.match(r'^/([a-zA-Z])[:|]([\\\\/].*)$', path)\n if windows_path:\n path = '%s:%s' % windows_path.groups()\n\n inferred_storage_options = {\n 'protocol': protocol,\n 'path': path,\n }\n\n if parsed_path.netloc:\n # Parse `hostname` from netloc manually because `parsed_path.hostname`\n # lowercases the hostname which is not always desirable (e.g. in S3):\n # https://github.com/dask/dask/issues/1417\n inferred_storage_options['host'] = parsed_path.netloc.rsplit('@', 1)[-1].rsplit(':', 1)[0]\n if parsed_path.port:\n inferred_storage_options['port'] = parsed_path.port\n if parsed_path.username:\n inferred_storage_options['username'] = parsed_path.username\n if parsed_path.password:\n inferred_storage_options['password'] = parsed_path.password\n\n if parsed_path.query:\n inferred_storage_options['url_query'] = parsed_path.query\n if parsed_path.fragment:\n inferred_storage_options['url_fragment'] = parsed_path.fragment\n\n if inherit_storage_options:\n if set(inherit_storage_options) & set(inferred_storage_options):\n raise KeyError(\"storage options (%r) and path url options (%r) \"\n \"collision is detected\"\n % (inherit_storage_options, inferred_storage_options))\n inferred_storage_options.update(inherit_storage_options)\n\n return inferred_storage_options\n\n\ndef dependency_depth(dsk):\n import toolz\n\n deps, _ = get_deps(dsk)\n\n @toolz.memoize\n def max_depth_by_deps(key):\n if not deps[key]:\n return 1\n\n d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])\n return d\n\n return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())\n\n\ndef eq_strict(a, b):\n \"\"\"Returns True if both values have the same type and are equal.\"\"\"\n if type(a) is type(b):\n return a == b\n return False\n\n\ndef memory_repr(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0\n\n\ndef put_lines(buf, lines):\n if any(not isinstance(x, unicode) for x in lines):\n lines = [unicode(x) for x in lines]\n buf.write('\\n'.join(lines))\n\n\n_method_cache = {}\n\n\nclass methodcaller(object):\n \"\"\"Return a callable object that calls the given method on its operand.\n\n Unlike the builtin `methodcaller`, this class is serializable\"\"\"\n\n __slots__ = ('method',)\n func = property(lambda self: self.method) # For `funcname` to work\n\n def __new__(cls, method):\n if method in _method_cache:\n return _method_cache[method]\n self = object.__new__(cls)\n self.method = method\n _method_cache[method] = self\n return self\n\n def __call__(self, obj, *args, **kwargs):\n return getattr(obj, self.method)(*args, **kwargs)\n\n def __reduce__(self):\n return (methodcaller, (self.method,))\n\n def __str__(self):\n return \"<%s: %s>\" % (self.__class__.__name__, self.method)\n\n __repr__ = __str__\n\n\nclass MethodCache(object):\n \"\"\"Attribute access on this object returns a methodcaller for that\n attribute.\n\n Examples\n --------\n >>> a = [1, 3, 3]\n >>> M.count(a, 3) == a.count(3)\n True\n \"\"\"\n __getattr__ = staticmethod(methodcaller)\n __dir__ = lambda self: list(_method_cache)\n\n\nM = MethodCache()\n\n\nclass SerializableLock(object):\n _locks = WeakValueDictionary()\n \"\"\" A Serializable per-process Lock\n\n This wraps a normal ``threading.Lock`` object and satisfies the same\n interface. However, this lock can also be serialized and sent to different\n processes. It will not block concurrent operations between processes (for\n this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``\n but will consistently deserialize into the same lock.\n\n So if we make a lock in one process::\n\n lock = SerializableLock()\n\n And then send it over to another process multiple times::\n\n bytes = pickle.dumps(lock)\n a = pickle.loads(bytes)\n b = pickle.loads(bytes)\n\n Then the deserialized objects will operate as though they were the same\n lock, and collide as appropriate.\n\n This is useful for consistently protecting resources on a per-process\n level.\n\n The creation of locks is itself not threadsafe.\n \"\"\"\n def __init__(self, token=None):\n self.token = token or str(uuid.uuid4())\n if self.token in SerializableLock._locks:\n self.lock = SerializableLock._locks[self.token]\n else:\n self.lock = Lock()\n SerializableLock._locks[self.token] = self.lock\n\n def acquire(self, *args):\n return self.lock.acquire(*args)\n\n def release(self, *args):\n return self.lock.release(*args)\n\n def __enter__(self):\n self.lock.__enter__()\n\n def __exit__(self, *args):\n self.lock.__exit__(*args)\n\n @property\n def locked(self):\n return self.locked\n\n def __getstate__(self):\n return self.token\n\n def __setstate__(self, token):\n self.__init__(token)\n\n def __str__(self):\n return \"<%s: %s>\" % (self.__class__.__name__, self.token)\n\n __repr__ = __str__\n\n\ndef effective_get(get=None, collection=None):\n \"\"\"Get the effective get method used in a given situation\"\"\"\n collection_get = collection._default_get if collection is not None else None\n return get or _globals.get('get') or collection_get\n\n\ndef get_scheduler_lock(get=None, collection=None):\n \"\"\"Get an instance of the appropriate lock for a certain situation based on\n scheduler used.\"\"\"\n from . import multiprocessing\n actual_get = effective_get(get, collection)\n\n if actual_get == multiprocessing.get:\n return mp.Manager().Lock()\n return SerializableLock()\n\n\ndef ensure_dict(d):\n if type(d) is dict:\n return d\n elif hasattr(d, 'dicts'):\n result = {}\n for dd in d.dicts.values():\n result.update(dd)\n return result\n return dict(d)\n\n\n_packages = {}\n\n\ndef package_of(typ):\n \"\"\" Return package containing type's definition\n\n Or return None if not found\n \"\"\"\n try:\n return _packages[typ]\n except KeyError:\n # http://stackoverflow.com/questions/43462701/get-package-of-python-object/43462865#43462865\n mod = inspect.getmodule(typ)\n if not mod:\n result = None\n else:\n base, _sep, _stem = mod.__name__.partition('.')\n result = sys.modules[base]\n _packages[typ] = result\n return result\n"
] |
[
[
"numpy.allclose",
"numpy.cumsum",
"numpy.iinfo",
"numpy.random.RandomState",
"numpy.empty"
]
] |
f-chenyi/Chlamydomonas_CCM
|
[
"3c7bf8ea178193b468217c55a770e1b83280c9a8"
] |
[
"ThylakoidStacks/EffectiveDiffusion_mesh.py"
] |
[
"# =============================================== #\n# ============ Import head packages ============ #\nfrom fenics import *\nimport pygmsh as pg\nimport os\nimport meshio\nimport numpy as np\nimport csv\n# =============================================== #\n# =============================================== #\n\n\n\n\ndef EqualSpacingIndex(m,n):\n return [i*n//m + n//(2*m) for i in range(m)]\n\n\n\n\ndef createMeshFull(N_STACK, N_nrrw, d_t, d_h, d_s, d_l, Delta_n, Delta_w, lmesh1, lmesh2, geoname, mshname, xmlname):\n \n '''\n This function outputs the mesh file for certain geometry of the thylakoid stacks,\n which is parameterized by the input variables:\n \n > N_STACKS = number of thylakoid stacks in the simulation domain\n \n > N_nrrw = number of layers with narrow gaps (see our paper for details)\n \n > d_t = thickness of the thylakoid membranes\n \n > d_h = height of the thylakoid lumen\n \n > d_s = smaller spacing between the thylakoid stacks\n \n > d_l = larger spacing between the thylakoid stacks\n \n > Delta_n = width of narrow gaps\n \n > Delta_w = width of wide gaps\n \n See Fig. S4 of our paper for more details.\n \n \n Other inputs are:\n \n > lmesh1, lmesh2: mesh size of the thylakoid membranes and the space of lumen/stroma\n \n > geoname, mshname, xmlname: directories for output .geo .msh and .xml files\n '''\n \n # y-coordinates of the thylakoid stacks\n ynull = np.linspace( 2*d_t + d_h + d_l/2 + d_s/2, 1 - (2*d_t + d_h + d_l/2 + d_s/2), N_STACK)\n yminus = ynull - d_t - d_s/2 - d_h/2\n yplus = ynull + d_t + d_s/2 + d_h/2\n yc = list(yminus) + list(yplus)\n yc.sort()\n yc_STACK = np.array(yc)\n \n # x-coordinates of the center of the gaps. Note that we model a geometry where all the gaps are aligned.\n xc_OPEN = 0.5*np.ones(len(yc_STACK))\n \n \n # gap size\n id_nrrw = EqualSpacingIndex(N_nrrw,N_STACK)\n Delta_STACK = Delta_w * np.ones(len(yc_STACK))\n Delta_STACK[np.array(id_nrrw)*2] = Delta_n\n Delta_STACK[np.array(id_nrrw)*2+1] = Delta_n\n \n \n geom = pg.built_in.Geometry()\n \n # counters of geometric objects\n domain_count = 1\n point_count = 1\n line_count = 1\n surf_count = 1\n \n # draw the bottom line\n p_init = geom.add_point([1., 0., 0.],lcar = lmesh1)\n p_now = p_init\n p_next = geom.add_point([0., 0., 0.],lcar = lmesh1)\n line_now = geom.add_line(p_now,p_next)\n line_sum = [line_now]\n p_now = p_next\n \n # draw the left part of the domain\n for i in np.arange(len(yc_STACK)):\n p1 = geom.add_point([ 0. , yc_STACK[i]-d_h/2, 0.0], lcar=lmesh2)\n p2 = geom.add_point([xc_OPEN[i] - Delta_STACK[i]/2 - d_t, yc_STACK[i]-d_h/2, 0.0], lcar=lmesh2)\n p3 = geom.add_point([xc_OPEN[i] - Delta_STACK[i]/2 - d_t, yc_STACK[i]+d_h/2, 0.0], lcar=lmesh2)\n p4 = geom.add_point([ 0., yc_STACK[i]+d_h/2, 0.0], lcar=lmesh2)\n p5 = geom.add_point([ 0., yc_STACK[i]+d_h/2+d_t, 0.0], lcar=lmesh2)\n p6 = geom.add_point([ xc_OPEN[i] - Delta_STACK[i]/2, yc_STACK[i]+d_h/2+d_t, 0.0], lcar=lmesh2)\n p7 = geom.add_point([ xc_OPEN[i] - Delta_STACK[i]/2, yc_STACK[i]-d_h/2-d_t, 0.0], lcar=lmesh2)\n p8 = geom.add_point([ 0., yc_STACK[i]-d_h/2-d_t, 0.0], lcar=lmesh2)\n\n line0 = geom.add_line(p_now,p8)\n line1 = geom.add_line(p8,p7)\n line2 = geom.add_line(p7,p6)\n line3 = geom.add_line(p6,p5)\n line4 = geom.add_line(p5,p4)\n line5 = geom.add_line(p4,p3)\n line6 = geom.add_line(p3,p2)\n line7 = geom.add_line(p2,p1)\n line8 = geom.add_line(p1,p8)\n line9 = geom.add_line(p1,p4)\n\n lineloop1 = geom.add_line_loop([line1,line2,line3,line4,\\\n line5,line6,line7,line8])\n lineloop2 = geom.add_line_loop([line5,line6,line7,line9])\n\n line_sum = line_sum + [line0]\n line_sum = line_sum + [line1]\n line_sum = line_sum + [line2]\n line_sum = line_sum + [line3]\n\n surf1 = geom.add_plane_surface(lineloop2)\n psurf1 = geom.add_physical(surf1,label=domain_count)\n domain_count += 1\n surf2 = geom.add_plane_surface(lineloop1)\n psurf2 = geom.add_physical(surf2,label=domain_count)\n domain_count += 1\n\n p_now = p5\n \n # draw the top lines\n p_next = geom.add_point([0., 1., 0.],lcar = lmesh1)\n line_sum = line_sum + [geom.add_line(p_now,p_next)]\n p_now = p_next\n\n p_next = geom.add_point([1., 1., 0.],lcar = lmesh1)\n line_sum = line_sum + [geom.add_line(p_now,p_next)]\n p_now = p_next\n \n # draw the right part\n i = 0\n for i in np.arange(len(yc_STACK))[::-1]:\n\n p1 = geom.add_point([ 1. , yc_STACK[i]-d_h/2, 0.0], lcar=lmesh2)\n p2 = geom.add_point([xc_OPEN[i] + Delta_STACK[i]/2 + d_t, yc_STACK[i]-d_h/2, 0.0], lcar=lmesh2)\n p3 = geom.add_point([xc_OPEN[i] + Delta_STACK[i]/2 + d_t, yc_STACK[i]+d_h/2, 0.0], lcar=lmesh2)\n p4 = geom.add_point([ 1., yc_STACK[i]+d_h/2, 0.0], lcar=lmesh2)\n p5 = geom.add_point([ 1., yc_STACK[i]+d_h/2+d_t, 0.0], lcar=lmesh2)\n p6 = geom.add_point([ xc_OPEN[i] + Delta_STACK[i]/2, yc_STACK[i]+d_h/2+d_t, 0.0], lcar=lmesh2)\n p7 = geom.add_point([ xc_OPEN[i] + Delta_STACK[i]/2, yc_STACK[i]-d_h/2-d_t, 0.0], lcar=lmesh2)\n p8 = geom.add_point([ 1., yc_STACK[i]-d_h/2-d_t, 0.0], lcar=lmesh2)\n\n line0 = geom.add_line(p_now,p5)\n line1 = geom.add_line(p5,p6)\n line2 = geom.add_line(p6,p7)\n line3 = geom.add_line(p7,p8)\n line4 = geom.add_line(p8,p1)\n line5 = geom.add_line(p1,p2)\n line6 = geom.add_line(p2,p3)\n line7 = geom.add_line(p3,p4)\n line8 = geom.add_line(p4,p5)\n line9 = geom.add_line(p4,p1)\n\n lineloop1 = geom.add_line_loop([line1,line2,line3,line4,\\\n line5,line6,line7,line8])\n lineloop2 = geom.add_line_loop([line5,line6,line7,line9])\n\n line_sum = line_sum + [line0]\n line_sum = line_sum + [line1]\n line_sum = line_sum + [line2]\n line_sum = line_sum + [line3]\n\n surf1 = geom.add_plane_surface(lineloop2)\n psurf1 = geom.add_physical(surf1,label=domain_count)\n domain_count += 1\n surf2 = geom.add_plane_surface(lineloop1)\n psurf2 = geom.add_physical(surf2,label=domain_count)\n domain_count += 1\n\n p_now = p8\n \n # add the stroma domain\n line_sum = line_sum + [geom.add_line(p_now,p_init)] # line contour of all the outer boundaries of thyalkoid membranes\n line_stroma = geom.add_line_loop(line_sum)\n surf_stroma = geom.add_plane_surface(line_stroma)\n psurf_stroma= geom.add_physical(surf_stroma,label=domain_count)\n\n mesh = pg.helpers.generate_mesh(geom,geo_filename=geoname)\n \n # Make sure that Gmsh is installed under the following directory\n os.system('/Applications/Gmsh.app/Contents/MacOS/gmsh %s'%geoname\\\n + ' -2 -o %s'%mshname)\n os.system('dolfin-convert %s'%mshname + ' %s'%xmlname)\n \n# =============================================== #\n# =============================================== #\n\n\n\n"
] |
[
[
"numpy.array",
"numpy.linspace"
]
] |
kevinbro96/perceptual-advex
|
[
"e40ee996ab8c4ae4575004bf7b6b8ab5757ed6bb"
] |
[
"perceptual_advex/vae.py"
] |
[
"from __future__ import print_function\nimport abc\nimport os\nimport math\n\nimport numpy as np\nimport logging\nimport torch\nimport torch.utils.data\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport pdb\n\nCIFAR_MEAN = [0.4914, 0.4822, 0.4465]\nCIFAR_STD = [0.2470, 0.2435, 0.2616]\n\ndef get_eps_params(base_eps, resol):\n eps_list = []\n max_list = []\n min_list = []\n for i in range(3):\n eps_list.append(torch.full((resol, resol), base_eps, device='cuda'))\n min_list.append(torch.full((resol, resol), 0., device='cuda'))\n max_list.append(torch.full((resol, resol), 255., device='cuda'))\n\n eps_t = torch.unsqueeze(torch.stack(eps_list), 0)\n max_t = torch.unsqueeze(torch.stack(max_list), 0)\n min_t = torch.unsqueeze(torch.stack(min_list), 0)\n return eps_t, max_t, min_t\n\ndef get_cifar_params(resol):\n mean_list = []\n std_list = []\n for i in range(3):\n mean_list.append(torch.full((resol, resol), CIFAR_MEAN[i], device='cuda'))\n std_list.append(torch.full((resol, resol), CIFAR_STD[i], device='cuda'))\n return torch.unsqueeze(torch.stack(mean_list), 0), torch.unsqueeze(torch.stack(std_list), 0)\n\nclass CIFARNORMALIZE(nn.Module):\n def __init__(self, resol):\n super().__init__()\n self.mean, self.std = get_cifar_params(resol)\n\n def forward(self, x):\n '''\n Parameters:\n x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD\n '''\n x = x.sub(self.mean)\n x = x.div(self.std)\n return x\n\nclass CIFARINNORMALIZE(nn.Module):\n def __init__(self, resol):\n super().__init__()\n self.mean, self.std = get_cifar_params(resol)\n\n def forward(self, x):\n '''\n Parameters:\n x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD\n '''\n x = x.mul(self.std)\n x = x.add(*self.mean)\n return x\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\ndef conv_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_uniform_(m.weight, gain=np.sqrt(2))\n init.constant_(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channels, out_channels, mid_channels=None, bn=False):\n super(ResBlock, self).__init__()\n\n if mid_channels is None:\n mid_channels = out_channels\n\n layers = [\n nn.LeakyReLU(),\n nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(mid_channels, out_channels, kernel_size=1, stride=1, padding=0)]\n if bn:\n layers.insert(2, nn.BatchNorm2d(out_channels))\n self.convs = nn.Sequential(*layers)\n\n def forward(self, x):\n return x + self.convs(x)\n\nclass AbstractAutoEncoder(nn.Module):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def encode(self, x):\n return\n\n @abc.abstractmethod\n def decode(self, z):\n return\n\n @abc.abstractmethod\n def forward(self, x):\n \"\"\"model return (reconstructed_x, *)\"\"\"\n return\n\n @abc.abstractmethod\n def sample(self, size):\n \"\"\"sample new images from model\"\"\"\n return\n\n @abc.abstractmethod\n def loss_function(self, **kwargs):\n \"\"\"accepts (original images, *) where * is the same as returned from forward()\"\"\"\n return\n\n @abc.abstractmethod\n def latest_losses(self):\n \"\"\"returns the latest losses in a dictionary. Useful for logging.\"\"\"\n return\n\nclass CVAE_s1_n(AbstractAutoEncoder):\n def __init__(self, d, z, **kwargs):\n super(CVAE_s1_n, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d),\n nn.ReLU(inplace=True),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n )\n\n self.decoder = nn.Sequential(\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n\n nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),\n )\n\n self.xi_bn = nn.BatchNorm2d(3)\n\n self.f = 8\n self.d = d\n self.z = z\n self.fc11 = nn.Linear(d * self.f ** 2, self.z)\n self.fc12 = nn.Linear(d * self.f ** 2, self.z)\n self.fc21 = nn.Linear(self.z, d * self.f ** 2)\n self.classifier = Wide_ResNet(28, 10, 0.3, 10)\n\n def encode(self, x):\n h = self.encoder(x)\n h1 = h.view(-1, self.d * self.f ** 2)\n return h, self.fc11(h1), self.fc12(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = std.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n z = z.view(-1, self.d, self.f, self.f)\n h3 = self.decoder(z)\n return torch.tanh(h3)\n\n def forward(self, x):\n _, mu, logvar = self.encode(x)\n hi = self.reparameterize(mu, logvar)\n hi_projected = self.fc21(hi)\n xi = self.decode(hi_projected)\n xi = self.xi_bn(xi)\n\n with torch.no_grad():\n out = self.classifier(x)\n out1 = self.classifier(xi)\n out2 = self.classifier(x-xi)\n return out, out1, out2, hi, xi, mu, logvar\n\nclass CVAE_s2(AbstractAutoEncoder):\n def __init__(self, d, z, **kwargs):\n super(CVAE_s2, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d),\n nn.ReLU(inplace=True),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n )\n\n self.decoder = nn.Sequential(\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n\n nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),\n )\n self.xi_bn = nn.BatchNorm2d(3)\n\n self.f = 8\n self.d = d\n self.z = z\n self.fc11 = nn.Linear(d * self.f ** 2, self.z)\n self.fc12 = nn.Linear(d * self.f ** 2, self.z)\n self.fc21 = nn.Linear(self.z, d * self.f ** 2)\n def encode(self, x):\n h = self.encoder(x)\n h1 = h.view(-1, self.d * self.f ** 2)\n return h, self.fc11(h1), self.fc12(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = std.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n z = z.view(-1, self.d, self.f, self.f)\n h3 = self.decoder(z)\n return torch.tanh(h3)\n\n def forward(self, x, mode):\n if mode == \"x-xi\":\n _, mu, logvar = self.encode(x)\n hi = self.reparameterize(mu, logvar)\n hi_projected = self.fc21(hi)\n xi = self.decode(hi_projected)\n xi = self.xi_bn(xi)\n return xi\n elif mode == \"x-hi\":\n _, mu, logvar = self.encode(x)\n hi = self.reparameterize(mu, logvar)\n return hi\n elif mode == \"hi-xi\":\n hi_projected = self.fc21(x)\n xi = self.decode(hi_projected)\n xi = self.xi_bn(xi)\n return xi\n\nclass CVAE_s3(AbstractAutoEncoder):\n def __init__(self, d, z, **kwargs):\n super(CVAE_s3, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d),\n nn.ReLU(inplace=True),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=False),\n )\n\n self.decoder = nn.Sequential(\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n\n nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),\n nn.Sigmoid()\n )\n\n self.f = 8\n self.d = d\n self.z = z\n self.fc11 = nn.Linear(d * self.f ** 2, self.z)\n self.fc12 = nn.Linear(d * self.f ** 2, self.z)\n self.fc21 = nn.Linear(self.z, d * self.f ** 2)\n def encode(self, x):\n h = self.encoder(x)\n h1 = h.view(-1, self.d * self.f ** 2)\n return h, self.fc11(h1), self.fc12(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = std.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n z = z.view(-1, self.d, self.f, self.f)\n h3 = self.decoder(z)\n return h3\n\n def forward(self, x):\n\n _, mu, logvar = self.encode(x)\n hi = self.reparameterize(mu, logvar)\n hi_projected = self.fc21(hi)\n xi = self.decode(hi_projected)\n\n return mu, logvar, xi\n\nclass CVAE_Normalize(AbstractAutoEncoder):\n def __init__(self, d, z, **kwargs):\n super(CVAE_Normalize, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d),\n nn.ReLU(inplace=True),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n )\n\n self.decoder = nn.Sequential(\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n\n nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),\n )\n self.xi_bn = nn.BatchNorm2d(3)\n\n self.f = 8\n self.d = d\n self.z = z\n self.fc11 = nn.Linear(d * self.f ** 2, self.z)\n self.fc12 = nn.Linear(d * self.f ** 2, self.z)\n self.fc21 = nn.Linear(self.z, d * self.f ** 2)\n self.normalize = CIFARNORMALIZE(32)\n self.innormalize = CIFARINNORMALIZE(32)\n\n def encode(self, x):\n h = self.encoder(x)\n h1 = h.view(-1, self.d * self.f ** 2)\n return h, self.fc11(h1), self.fc12(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = std.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n z = z.view(-1, self.d, self.f, self.f)\n h3 = self.decoder(z)\n return torch.tanh(h3)\n\n def forward(self, x):\n x = self.normalize(x)\n _, mu, logvar = self.encode(x)\n hi = self.reparameterize(mu, logvar)\n hi_projected = self.fc21(hi)\n xi = self.decode(hi_projected)\n xi = self.xi_bn(xi)\n xi = self.innormalize(xi)\n return mu, logvar, xi\n\nclass CVAE_Normalize_Rand(AbstractAutoEncoder):\n def __init__(self, d, z, **kwargs):\n super(CVAE_Normalize_Rand, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d),\n nn.ReLU(inplace=True),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n )\n\n self.decoder = nn.Sequential(\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n\n nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),\n )\n self.xi_bn = nn.BatchNorm2d(3)\n\n self.f = 8\n self.d = d\n self.z = z\n self.fc11 = nn.Linear(d * self.f ** 2, self.z)\n self.fc12 = nn.Linear(d * self.f ** 2, self.z)\n self.fc21 = nn.Linear(self.z, d * self.f ** 2)\n self.normalize = CIFARNORMALIZE(32)\n self.innormalize = CIFARINNORMALIZE(32)\n\n def encode(self, x):\n h = self.encoder(x)\n h1 = h.view(-1, self.d * self.f ** 2)\n return h, self.fc11(h1), self.fc12(h1)\n\n def reparameterize(self, mu, logvar):\n std = logvar.mul(0.5).exp_()\n eps = std.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n\n\n def decode(self, z):\n z = z.view(-1, self.d, self.f, self.f)\n h3 = self.decoder(z)\n return torch.tanh(h3)\n\n def forward(self, x):\n x = self.normalize(x)\n _, mu, logvar = self.encode(x)\n hi = self.reparameterize(mu, logvar)\n hi_projected = self.fc21(hi)\n xi = self.decode(hi_projected)\n xi = self.xi_bn(xi)\n xi = self.innormalize(xi)\n return mu, logvar, xi\n\nclass CAE(AbstractAutoEncoder):\n def __init__(self, d, z, **kwargs):\n super(CAE, self).__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.ReLU(inplace=True),\n nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d),\n nn.ReLU(inplace=True),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n )\n\n self.decoder = nn.Sequential(\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n ResBlock(d, d, bn=True),\n nn.BatchNorm2d(d),\n\n nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(d // 2),\n nn.LeakyReLU(inplace=True),\n nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),\n )\n self.xi_bn = nn.BatchNorm2d(3)\n\n self.f = 8\n self.d = d\n self.z = z\n self.fc21 = nn.Linear(self.z, d * self.f ** 2)\n self.normalize = CIFARNORMALIZE(32)\n self.innormalize = CIFARINNORMALIZE(32)\n\n def encode(self, x):\n h = self.encoder(x)\n h1 = h.view(-1, self.d * self.f ** 2)\n return h1\n\n def decode(self, z):\n z = z.view(-1, self.d, self.f, self.f)\n h3 = self.decoder(z)\n return torch.tanh(h3)\n\n def forward(self, x):\n x = self.normalize(x)\n hi = self.encode(x)\n hi_projected = self.fc21(hi)\n xi = self.decode(hi_projected)\n xi = self.xi_bn(xi)\n xi = self.innormalize(xi)\n return 1, 1, xi\n\n\n"
] |
[
[
"torch.nn.Sequential",
"numpy.sqrt",
"torch.full",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.tanh",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.nn.ReLU"
]
] |
danholdaway/eva
|
[
"a5a784953479132080fa3a2ea5b9e9d3dc08cd68"
] |
[
"src/eva/data/data_collections.py"
] |
[
"# (C) Copyright 2021-2022 NOAA/NWS/EMC\n#\n# (C) Copyright 2021-2022 United States Government as represented by the Administrator of the\n# National Aeronautics and Space Administration. All Rights Reserved.\n#\n# This software is licensed under the terms of the Apache Licence Version 2.0\n# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.\n\n\n# --------------------------------------------------------------------------------------------------\n\n\nimport numpy as np\nimport xarray as xr\n\nfrom eva.utilities.logger import Logger\nfrom eva.utilities.utils import fontColors as fcol, string_does_not_contain\n\n\n# --------------------------------------------------------------------------------------------------\n\n\n# Characters that are not permitted in collection, group and variable names.\n# Math chars not allowed in order to allow evaluation of the variables in the transforms\ndisallowed_chars = '-+*/()'\n\n\n# --------------------------------------------------------------------------------------------------\n\n\nclass DataCollections:\n\n def __init__(self):\n\n # Dictionary to map between collection name and collection itself\n self._collections = {}\n\n # Create a logger\n self.logger = Logger('DataCollections')\n\n # ----------------------------------------------------------------------------------------------\n\n def create_or_add_to_collection(self, collection_name, collection, concat_dimension=None):\n\n # Collections should only be xarray datasets\n if not isinstance(collection, xr.Dataset):\n self.logger.abort('In add_collection: collection must be an xarray.Dataset')\n\n # Check that there is not an existing collection that is empty\n if collection_name in self._collections:\n if not list(self._collections[collection_name].keys()):\n self.logger.abort('In create_or_add_to_collection the collection \\'' +\n collection_name + '\\' is already in existence but appears to ' +\n 'be empty.')\n\n # Create the collection or concatenate with existing collection\n # If the collection does not already exist within the dictionary then the incoming\n # collection is used to initialize that collection. If the collection already exists the\n # below will abort, unless a concatenation dimension is offered and it is a valid dimension\n # in the existing collection.\n if collection_name not in self._collections:\n self._collections[collection_name] = collection.copy(deep=False)\n else:\n if concat_dimension is None:\n self.logger.abort('In create_or_add_to_collection the collection \\'' +\n collection_name + '\\' being added already exists. Either ' +\n 'remove collection or provide a dimension along which to ' +\n 'concatenate.')\n dims = list(self._collections[collection_name].dims)\n if concat_dimension not in dims:\n self.logger.abort('In create_or_add_to_collection the collection \\'' +\n collection_name + '\\' does not have the dimension \\'' +\n concat_dimension + '\\' that is requested as the dimension ' +\n 'along which to concatenate. Valid dimensions are ' +\n f'{dims}')\n self._collections[collection_name] = xr.concat([self._collections[collection_name],\n collection], dim=concat_dimension)\n\n # Check that nothing violates the naming conventions\n self.validate_names()\n\n # ----------------------------------------------------------------------------------------------\n\n def add_variable_to_collection(self, collection_name, group_name, variable_name, variable):\n\n # Assert that new variable is an xarray Dataarray\n if not isinstance(variable, xr.DataArray):\n self.logger.abort('In add_variable_to_collection: variable must be xarray.DataArray')\n\n # Check that there is not an existing collection that is empty\n if collection_name not in self._collections:\n # Create a new collection to hold the variable\n self._collections[collection_name] = xr.Dataset()\n\n # Combine the group and variable name\n group_variable_name = group_name + '::' + variable_name\n\n # Add the variable to the collection\n self._collections[collection_name][group_variable_name] = variable\n\n # Check that nothing violates the naming conventions\n self.validate_names()\n\n # ----------------------------------------------------------------------------------------------\n\n def get_variable_data_array(self, collection_name, group_name, variable_name, channels=None):\n\n group_variable_name = group_name + '::' + variable_name\n\n data_array = self._collections[collection_name][group_variable_name]\n\n if channels is None:\n return data_array\n elif isinstance(channels, int) or not any(not isinstance(c, int) for c in channels):\n # nchans must be a dimension if it will be used for selection\n if 'nchans' not in list(self._collections[collection_name].dims):\n self.logger.abort('In get_variable_data_array channels is provided but nchans ' +\n 'is not a dimension of the Dataset')\n # Make sure it is a list\n channels_sel = []\n channels_sel.append(channels)\n\n # Create a new DataArray with the requested channels\n data_array_channels = data_array.sel(nchans=channels_sel)\n return data_array_channels\n\n else:\n self.logger.abort('In get_variable_data_array channels is neither none or list of ' +\n 'integers')\n\n # ----------------------------------------------------------------------------------------------\n\n def get_variable_data(self, collection_name, group_name, variable_name, channels=None):\n\n variable_array = self.get_variable_data_array(collection_name, group_name, variable_name,\n channels)\n\n # Extract the actual data array\n variable_data = variable_array.data\n\n # Squeeze in case of dimension of 1 (e.g. when 1 channel is needed)\n variable_data = np.squeeze(variable_data)\n\n return variable_data\n\n # ----------------------------------------------------------------------------------------------\n\n def validate_names(self):\n\n # This code checks that the naming conventions are compliant with what is expected\n\n for collection_key in self._collections.keys():\n\n # Assert that the collection name does not contain disallowed characters\n if not string_does_not_contain(disallowed_chars, collection_key):\n self.logger.abort(f'Collection contains the key \\'{collection_key}\\', which ' +\n f'contains a character that is not permitted ' +\n f'({disallowed_chars})')\n\n # Loop over the data variables\n for data_var in list(self._collections[collection_key].data_vars):\n\n # Assert that the datavar contains '::' identifier, splitting group and variable\n if '::' not in data_var:\n self.logger.abort(f'Collection \\'{collection_key}\\' contains the following ' +\n f'data variable \\'{data_var}\\', which does not contain ' +\n f'\\'::\\' splitting the group and variable.')\n [group, variable] = data_var.split('::')\n # Assert that the group name does not contain disallowed characters\n if not string_does_not_contain(disallowed_chars, group):\n self.logger.abort(f'Collection \\'{collection_key}\\' contains the following ' +\n f'element \\'{data_var}\\'. The group \\'{group}\\'' +\n f'contains a character that is not permitted ' +\n f'({disallowed_chars}).')\n # Assert that the variable name does not contain disallowed characters\n if not string_does_not_contain(disallowed_chars, variable):\n self.logger.abort(f'Collection \\'{collection_key}\\' contains the following ' +\n f'element \\'{data_var}\\'. The variable \\'{variable}\\'' +\n f'contains a character that is not permitted ' +\n f'({disallowed_chars}).')\n\n # ----------------------------------------------------------------------------------------------\n\n def nan_float_values_outside_threshold(self, threshold, cgv_to_screen=None):\n\n # Set the collection, group and variables\n # ---------------------------------------\n if cgv_to_screen is None:\n collections = self._collections.keys()\n else:\n cgv = cgv_to_screen.split('::')\n collections = [cgv[0]]\n groups_variables = [cgv[1]+'::'+cgv[2]]\n\n # Loop over the collections\n # ------------------------------\n for collection in collections:\n\n # Set the variables to screen\n # ---------------------------\n if cgv_to_screen is None:\n groups_variables = list(self._collections[collection].data_vars)\n\n # Loop over the variables and set to nan outside of threshold\n # -----------------------------------------------------------\n for group_variable in groups_variables:\n\n # Split name into group and variable\n [group, variable] = group_variable.split('::')\n\n # Get the data\n data_var_value = self.get_variable_data(collection, group, variable)\n\n # For float data sceen outside threshold\n if 'float' in str(data_var_value.dtype):\n data_var_value[np.abs(data_var_value) > threshold] = np.nan\n\n # ----------------------------------------------------------------------------------------------\n\n def display_collections(self):\n\n minmaxrms_format_dict = {\n 'float32': '{:+.4e}',\n 'int32': '{:+11d}',\n }\n\n # Display a list of variables that are available in the collection\n self.logger.info('-'*80)\n self.logger.info(fcol.bold + 'Collections available: ' + fcol.end)\n for collection in self._collections.keys():\n self.logger.info('')\n self.logger.info('Collection name: ' + fcol.underline + collection + fcol.end)\n self.logger.info('\\n Dimensions:')\n for dim in list(self._collections[collection].dims):\n dim_value = self._collections[collection].dims[dim]\n self.logger.info(f' {dim}: {dim_value}')\n self.logger.info('\\n Coordinates:')\n for coord in list(self._collections[collection].coords):\n self.logger.info(f' {coord}')\n self.logger.info('\\n Data (group::variable):')\n data_vars = list(self._collections[collection].data_vars)\n max_name_len = len(max(data_vars, key=len))\n for data_var in data_vars:\n group_var = data_var.split('::')\n data_var_value = self.get_variable_data(collection, group_var[0], group_var[1])\n minmaxrms = ''\n if str(data_var_value.dtype) in minmaxrms_format_dict:\n minmaxrms_format = minmaxrms_format_dict[str(data_var_value.dtype)]\n min_string = 'Min=' + minmaxrms_format.format(np.nanmin(data_var_value))\n max_string = 'Max=' + minmaxrms_format.format(np.nanmax(data_var_value))\n rms_string = ''\n if str(data_var_value.dtype) == 'float32':\n rms = np.sqrt(np.nanmean(data_var_value**2))\n rms_string = 'RMS=' + minmaxrms_format.format(rms)\n minmaxrms_string = ' | ' + min_string + ', ' + max_string + ', ' + rms_string\n self.logger.info(' ' + data_var.ljust(max_name_len) + ' (' +\n str(data_var_value.dtype).ljust(7) + ')' + minmaxrms_string)\n self.logger.info('-'*80)\n\n # ----------------------------------------------------------------------------------------------\n"
] |
[
[
"numpy.nanmax",
"numpy.abs",
"numpy.squeeze",
"numpy.nanmin",
"numpy.nanmean"
]
] |
kchen0x/emotion-detection
|
[
"08402e724d218feb5d3df1fa31f9631cf97d3629"
] |
[
"src/camera.py"
] |
[
"from statistics import mode\n\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\n\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import draw_text\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.preprocessor import preprocess_input\n\n\nclass VideoCamera(object):\n def __init__(self):\n # Using OpenCV to capture from device 0. If you have trouble capturing\n # from a webcam, comment the line below out and use a video file\n # instead.\n self.video = cv2.VideoCapture(0)\n self.video.set(3, 640)\n self.video.set(4, 480)\n # If you decide to use video.mp4, you must have this file in the folder\n # as the main.py.\n # self.video = cv2.VideoCapture('video.mp4')\n\n # parameters for loading data and images\n self.detection_model_path = os.path.dirname(__file__) + '/../trained_models/detection_models/haarcascade_frontalface_default.xml'\n self.emotion_model_path = os.path.dirname(__file__) + '/../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'\n self.emotion_labels = get_labels('fer2013')\n\n # hyper-parameters for bounding boxes shape\n self.frame_window = 10\n self.emotion_offsets = (20, 40)\n\n # loading models\n self.face_detection = load_detection_model(self.detection_model_path)\n self.emotion_classifier = load_model(self.emotion_model_path, compile=False)\n\n # getting input model shapes for inference\n self.emotion_target_size = self.emotion_classifier.input_shape[1:3]\n\n # starting lists for calculating modes\n self.emotion_window = []\n\n def __del__(self):\n self.video.release()\n\n def get_frame(self):\n success, image = self.video.read()\n # We are using Motion JPEG, but OpenCV defaults to capture raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n\n def face_camera(self):\n bgr_image = self.video.read()[1]\n gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)\n rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)\n faces = detect_faces(self.face_detection, gray_image)\n\n for face_coordinates in faces:\n\n x1, x2, y1, y2 = apply_offsets(face_coordinates,\n self.emotion_offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n try:\n gray_face = cv2.resize(gray_face, (self.emotion_target_size))\n except:\n continue\n\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n emotion_prediction = self.emotion_classifier.predict(gray_face)\n emotion_probability = np.max(emotion_prediction)\n emotion_label_arg = np.argmax(emotion_prediction)\n emotion_text = self.emotion_labels[emotion_label_arg]\n self.emotion_window.append(emotion_text)\n\n if len(self.emotion_window) > self.frame_window:\n self.emotion_window.pop(0)\n try:\n emotion_mode = mode(self.emotion_window)\n except:\n continue\n\n if emotion_text == 'angry':\n color = emotion_probability * np.asarray((255, 0, 0))\n elif emotion_text == 'sad':\n color = emotion_probability * np.asarray((0, 0, 255))\n elif emotion_text == 'happy':\n color = emotion_probability * np.asarray((255, 255, 0))\n elif emotion_text == 'surprise':\n color = emotion_probability * np.asarray((0, 255, 255))\n else:\n color = emotion_probability * np.asarray((0, 255, 0))\n\n color = color.astype(int)\n color = color.tolist()\n\n draw_bounding_box(face_coordinates, rgb_image, color)\n draw_text(face_coordinates, rgb_image, emotion_mode, color, 0,\n -45, 1, 1)\n\n bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n jpeg = cv2.imencode('.jpg', bgr_image)[1]\n return jpeg.tobytes()\n"
] |
[
[
"numpy.asarray",
"numpy.max",
"numpy.expand_dims",
"numpy.argmax"
]
] |
GuoQiang-Fu/UQpy
|
[
"3a4ddb152c4b04f82dbd515c1677a92a92e6ba4f"
] |
[
"src/UQpy/Surrogates.py"
] |
[
"# UQpy is distributed under the MIT license.\n#\n# Copyright (C) 2018 -- Michael D. Shields\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"This module contains functionality for all the surrogate methods supported in UQpy.\n\nThe module currently contains the following classes:\n\n- ``SROM``: Class to estimate a discrete approximation for a continuous random variable using Stochastic Reduced Order\n Model.\n- ``Kriging``: Class to generate an approximate surrogate model using Kriging.\n\n- ``PCE``: Class to generate an approximate surrogate model using Polynomial Chaos Expansion.\n\"\"\"\n\nimport numpy as np\nimport scipy.stats as stats\nfrom UQpy.Distributions import Normal, Uniform, Lognormal, Rayleigh, JointInd\nimport scipy.integrate as integrate\nimport scipy.special as special\nimport itertools, math\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom UQpy.Distributions import DistributionContinuous1D\n\n\n########################################################################################################################\n########################################################################################################################\n# Stochastic Reduced Order Model (SROM) #\n########################################################################################################################\n########################################################################################################################\n\n\nclass SROM:\n\n \"\"\"\n Stochastic Reduced Order Model(SROM) provide a low-dimensional, discrete approximation of a given random\n quantity.\n\n **Inputs:**\n\n * **samples** (`ndarray`):\n An array/list of samples corresponding to the points at which the SROM is defined.\n\n * **target_dist_object** ((list of) ``Distribution`` object(s)):\n A list of distribution objects for each random variable.\n\n * **moments** (`list` of `float`):\n A list containing first and second order moment about origin of all random variables.\n\n * **weights_errors** (`list` of `float`):\n A list of weights associated with the error in distribution, moments and correlation.\n\n This corresponds to a list of the values :math:`a_{u}` in the objective function above.\n\n Default: weights_errors = [1, 0.2, 0]\n\n * **properties** (`list` of `booleans`):\n A list of booleans declaring the properties to be matched in the reduced order model.\n\n `properties[0] = True` matches the marginal distributions\n\n `properties[1] = True` matches the mean values\n\n `properties[2] = True` matches the mean square\n\n `properties[3] = True` matches the correlation\n\n * **weights_distribution** (`ndarray` or `list` of `float`):\n A list or array containing weights associated with matching the distribution at each sample value.\n\n `weights_distribution` is an array or list of shape `(m, d)` where each weight corresponds to the weight\n :math:`w_F(x_{k,i}; i)` assigned for matching the distribution of component `i` at sample point\n :math:`x_{k,i}`.\n\n If `weights_distribution` is `(1, d)`, it is assumed that each sample sample is equally weighted according\n to the corresponding weight for its distribution.\n\n Default: `weights_distribution` = An array of shape `(m, d)` with all elements equal to 1.\n\n * **weights_moments** (`ndarray` or `list` of `float`):\n An list or array containing weights associated with matching the moments about the origin for each\n component.\n\n `weights_moments` is a list or array of shape `(2, d), where each weight corresponds to the weight\n :math:`w_{\\mu}(r; i)` assigned for matching the moment of order :math:`r = 1, 2` for component `i`.\n\n If `weights_moments` is `(1, d)`, it is assumed that moments of all order are equally weighted.\n\n Default: `weights_moments` = [[1/(moment[0][i]^2)], [1/(moment[1][i]^2)]] for i = 1, 2, ..., d.\n\n * **weights_correlation** (`ndarray` or `list` of `float`):\n A list or array containing weights associated with matching the correlation of the random variables.\n\n `weights_correlation` is a list or array of shape `(d, d)` where each weight corresponds to the weight\n :math:`w_R(i, j)` assigned for matching the correlation between component `i` and component `j`\n\n Default: `weights_correlation` = `(d, d)` array with all elements equal to 1.\n\n * **correlation** (`ndarray` or `list of floats`):\n Correlation matrix between random variables.\n\n **Attribute:**\n\n * **sample_weights** (`ndarray`):\n The probability weights defining discrete approximation of continuous random variables.\n\n **Methods:**\n\n \"\"\"\n\n def __init__(self, samples, target_dist_object, moments=None, weights_errors=None, weights_distribution=None,\n weights_moments=None, weights_correlation=None, properties=None, correlation=None, verbose=False):\n\n self.target_dist_object = target_dist_object\n self.correlation = correlation\n self.moments = moments\n\n self.weights_distribution = weights_distribution\n self.weights_moments = weights_moments\n self.weights_correlation = weights_correlation\n self.weights_errors = weights_errors\n\n self.properties = properties\n self.verbose = verbose\n self.sample_weights = None\n\n if isinstance(samples, list):\n self.samples = np.array(samples)\n self.nsamples = self.samples.shape[0]\n self.dimension = self.samples.shape[1]\n elif isinstance(samples, np.ndarray):\n self.dimension = samples.shape[1]\n self.samples = samples\n self.nsamples = samples.shape[0]\n else:\n raise NotImplementedError(\"UQpy: 'samples' sholud be a list or numpy array\")\n\n if self.target_dist_object is None:\n raise NotImplementedError(\"UQpy: Target Distribution is not defined.\")\n\n if isinstance(self.target_dist_object, list):\n for i in range(len(self.target_dist_object)):\n if not isinstance(self.target_dist_object[i], DistributionContinuous1D):\n raise TypeError('UQpy: A DistributionContinuous1D object must be provided.')\n\n if self.properties is not None:\n self.run()\n else:\n print('UQpy: No properties list provided, execute the SROM by calling run method and specifying a '\n 'properties list')\n\n def run(self, weights_errors=None, weights_distribution=None, weights_moments=None, weights_correlation=None,\n properties=None):\n \"\"\"\n Execute the stochastic reduced order model in the ``SROM`` class.\n\n The ``run`` method is the function that computes the probability weights corresponding to the sample. If\n `properties` is provided, the ``run`` method is automatically called when the ``SROM`` object is defined. The\n user may also call the ``run`` method directly to generate samples. The ``run`` method of the ``SROM`` class can\n be invoked many times with different weights parameters and each time computed probability weights are\n overwritten.\n\n **Inputs:**\n\n * **weights_errors** (`list` of `float`):\n A list of weights associated with the error in distribution, moments and correlation.\n\n This corresponds to a list of the values :math:`a_{u}` in the objective function above.\n\n Default: weights_errors = [1, 0.2, 0]\n\n * **properties** (`list` of `booleans`):\n A list of booleans declaring the properties to be matched in the reduced order model.\n\n `properties[0] = True` matches the marginal distributions\n\n `properties[1] = True` matches the mean values\n\n `properties[2] = True` matches the mean square\n\n `properties[3] = True` matches the correlation\n\n * **weights_distribution** (`ndarray` or `list` of `float`):\n A list or array containing weights associated with matching the distribution at each sample value.\n\n `weights_distribution` is an array or list of shape `(m, d)` where each weight corresponds to the weight\n :math:`w_F(x_{k,i}; i)` assigned for matching the distribution of component `i` at sample point\n :math:`x_{k,i}`.\n\n If `weights_distribution` is `(1, d)`, it is assumed that each sample sample is equally weighted according\n to the corresponding weight for its distribution.\n\n Default: `weights_distribution` = An array of shape `(m, d)` with all elements equal to 1.\n\n * **weights_moments** (`ndarray` or `list` of `float`):\n An list or array containing weights associated with matching the moments about the origin for each\n component.\n\n `weights_moments` is a list or array of shape `(2, d), where each weight corresponds to the weight\n :math:`w_{\\mu}(r; i)` assigned for matching the moment of order :math:`r = 1, 2` for component `i`.\n\n If `weights_moments` is `(1, d)`, it is assumed that moments of all order are equally weighted.\n\n Default: `weights_moments` = [[1/(moment[0][i]^2)], [1/(moment[1][i]^2)]] for i = 1, 2, ..., d.\n\n * **weights_correlation** (`ndarray` or `list` of `float`):\n A list or array containing weights associated with matching the correlation of the random variables.\n\n `weights_correlation` is a list or array of shape `(d, d)` where each weight corresponds to the weight\n :math:`w_R(i, j)` assigned for matching the correlation between component `i` and component `j`\n\n Default: `weights_correlation` = `(d, d)` array with all elements equal to 1.\n\n \"\"\"\n from scipy import optimize\n self.weights_distribution = weights_distribution\n self.weights_moments = weights_moments\n self.weights_correlation = weights_correlation\n self.weights_errors = weights_errors\n self.properties = properties\n\n # Check properties to match\n if self.properties is None:\n self.properties = [True, True, True, False]\n\n self._init_srom()\n\n if self.verbose:\n print('UQpy: Performing SROM...')\n\n def f(p0, samples, wd, wm, wc, mar, n, d, m, alpha, prop, correlation):\n e1 = 0.\n e2 = 0.\n e22 = 0.\n e3 = 0.\n com = np.append(samples, np.atleast_2d(p0).T, 1)\n for j in range(d):\n srt = com[np.argsort(com[:, j].flatten())]\n s = srt[:, j]\n a = srt[:, d]\n a0 = np.cumsum(a)\n marginal = mar[j].cdf\n\n if prop[0] is True:\n for i in range(n):\n e1 += wd[i, j] * (a0[i] - marginal(s[i])) ** 2\n\n if prop[1] is True:\n e2 += wm[0, j] * (np.sum(p0 * samples[:, j]) - m[0, j]) ** 2\n\n if prop[2] is True:\n e22 += wm[1, j] * (\n np.sum(np.array(p0) * (samples[:, j] * samples[:, j])) - m[1, j]) ** 2\n\n if prop[3] is True:\n for k in range(d):\n if k > j:\n r = correlation[j, k] * np.sqrt((m[1, j] - m[0, j] ** 2) * (m[1, k] - m[0, k] ** 2)) + \\\n m[0, j] * m[0, k]\n e3 += wc[k, j] * (np.sum(p0 * (samples[:, j] * samples[:, k])) - r) ** 2\n\n return alpha[0] * e1 + alpha[1] * (e2 + e22) + alpha[2] * e3\n\n def constraint(x):\n return np.sum(x) - 1\n\n cons = {'type': 'eq', 'fun': constraint}\n\n p_ = optimize.minimize(f, np.zeros(self.nsamples),\n args=(self.samples, self.weights_distribution, self.weights_moments,\n self.weights_correlation, self.target_dist_object, self.nsamples, self.dimension,\n self.moments, self.weights_errors, self.properties, self.correlation),\n constraints=cons, method='SLSQP', bounds=[[0, 1]]*self.nsamples)\n\n self.sample_weights = p_.x\n if self.verbose:\n print('UQpy: SROM completed!')\n\n def _init_srom(self):\n \"\"\"\n Initialization and preliminary error checks.\n \"\"\"\n if isinstance(self.moments, list):\n self.moments = np.array(self.moments)\n\n if isinstance(self.correlation, list):\n self.correlation = np.array(self.correlation)\n\n # Check moments and correlation\n if self.properties[1] is True or self.properties[2] is True or self.properties[3] is True:\n if self.moments is None:\n raise NotImplementedError(\"UQpy: 'moments' are required\")\n # Both moments are required, if correlation property is required to be match\n if self.properties[3] is True:\n if self.moments.shape != (2, self.dimension):\n raise NotImplementedError(\"UQpy: Shape of 'moments' is not correct\")\n if self.correlation is None:\n self.correlation = np.identity(self.dimension)\n # moments.shape[0] should be 1 or 2\n if self.moments.shape != (1, self.dimension) and self.moments.shape != (2, self.dimension):\n raise NotImplementedError(\"UQpy: Shape of 'moments' is not correct\")\n # If both the moments are to be included in objective function, then moments.shape[0] should be 2\n if self.properties[1] is True and self.properties[2] is True:\n if self.moments.shape != (2, self.dimension):\n raise NotImplementedError(\"UQpy: Shape of 'moments' is not correct\")\n # If only second order moment is to be included in objective function and moments.shape[0] is 1. Then\n # self.moments is converted shape = (2, self.dimension) where is second row contain second order moments.\n if self.properties[1] is False and self.properties[2] is True:\n if self.moments.shape == (1, self.dimension):\n temp = np.ones(shape=(1, self.dimension))\n self.moments = np.concatenate((temp, self.moments))\n\n # Check weights corresponding to errors\n if self.weights_errors is None:\n self.weights_errors = [1, 0.2, 0]\n elif isinstance(self.weights_errors, list):\n self.weights_errors = np.array(self.weights_errors).astype(np.float64)\n elif not isinstance(self.weights_errors, np.ndarray):\n raise NotImplementedError(\"UQpy: weights_errors attribute should be a list or numpy array\")\n\n # Check weights corresponding to distribution\n if self.weights_distribution is None:\n self.weights_distribution = np.ones(shape=(self.samples.shape[0], self.dimension))\n elif isinstance(self.weights_distribution, list):\n self.weights_distribution = np.array(self.weights_distribution)\n elif not isinstance(self.weights_distribution, np.ndarray):\n raise NotImplementedError(\"UQpy: weights_distribution attribute should be a list or numpy array\")\n\n if self.weights_distribution.shape == (1, self.dimension):\n self.weights_distribution = self.weights_distribution * np.ones(shape=(self.samples.shape[0],\n self.dimension))\n elif self.weights_distribution.shape != (self.samples.shape[0], self.dimension):\n raise NotImplementedError(\"UQpy: Size of 'weights for distribution' is not correct\")\n\n # Check weights corresponding to moments and it's default list\n if self.weights_moments is None:\n self.weights_moments = np.reciprocal(np.square(self.moments))\n elif isinstance(self.weights_moments, list):\n self.weights_moments = np.array(self.weights_moments)\n elif not isinstance(self.weights_moments, np.ndarray):\n raise NotImplementedError(\"UQpy: weights_moments attribute should be a list or numpy array\")\n\n if self.weights_moments.shape == (1, self.dimension):\n self.weights_moments = self.weights_moments * np.ones(shape=(2, self.dimension))\n elif self.weights_moments.shape != (2, self.dimension):\n raise NotImplementedError(\"UQpy: Size of 'weights for moments' is not correct\")\n\n # Check weights corresponding to correlation and it's default list\n if self.weights_correlation is None:\n self.weights_correlation = np.ones(shape=(self.dimension, self.dimension))\n elif isinstance(self.weights_correlation, list):\n self.weights_correlation = np.array(self.weights_correlation)\n elif not isinstance(self.weights_correlation, np.ndarray):\n raise NotImplementedError(\"UQpy: weights_correlation attribute should be a list or numpy array\")\n\n if self.weights_correlation.shape != (self.dimension, self.dimension):\n raise NotImplementedError(\"UQpy: Size of 'weights for correlation' is not correct\")\n\n\n########################################################################################################################\n########################################################################################################################\n# Kriging Interpolation (Kriging) #\n########################################################################################################################\n########################################################################################################################\n\nclass Kriging:\n \"\"\"\n Kriging generates an Gaussian process regression-based surrogate model to predict the model output at new sample\n points.\n\n **Inputs:**\n\n * **reg_model** (`str` or `function`):\n `reg_model` specifies and evaluates the basis functions and their coefficients, which defines the trend of\n the model.\n\n Built-in options (string input): 'Constant', 'Linear', 'Quadratic'\n\n The user may also pass a callable function as defined in `User-Defined Regression Model` above.\n\n * **corr_model** (`str` or `function`):\n `corr_model` specifies and evaluates the correlation function.\n\n Built-in options (string input): 'Exponential', 'Gaussian', 'Linear', 'Spherical', 'Cubic', 'Spline'\n\n The user may also pass a callable function as defined in `User-Defined Correlation` above.\n\n * **corr_model_params** (`ndarray` or `list of floats`):\n List or array of initial values for the correlation model hyperparameters/scale parameters.\n\n * **bounds** (`list` of `float`):\n Bounds on the hyperparameters used to solve optimization problem to estimate maximum likelihood estimator.\n This should be a closed bound.\n\n Default: [0.001, 10**7] for each hyperparameter.\n\n * **op** (`boolean`):\n Indicator to solve MLE problem or not. If 'True' corr_model_params will be used as initial solution for\n optimization problem. Otherwise, corr_model_params will be directly use as the hyperparamters.\n\n Default: True.\n\n * **nopt** (`int`):\n Number of times MLE optimization problem is to be solved with a random starting point.\n\n Default: 1.\n\n * **verbose** (`Boolean`):\n A boolean declaring whether to write text to the terminal.\n\n Default value: False\n\n **Attributes:**\n\n * **beta** (`ndarray`):\n Regression coefficients.\n\n * **err_var** (`ndarray`):\n Variance of the Gaussian random process.\n\n * **C_inv** (`ndarray`):\n Inverse Cholesky decomposition of the correlation matrix.\n\n **Methods:**\n\n \"\"\"\n\n def __init__(self, reg_model='Linear', corr_model='Exponential', bounds=None, op=True, nopt=1, normalize=True,\n verbose=False, corr_model_params=None, optimizer=None, random_state=None, **kwargs_optimizer):\n\n self.reg_model = reg_model\n self.corr_model = corr_model\n self.corr_model_params = np.array(corr_model_params)\n self.bounds = bounds\n self.optimizer = optimizer\n self.nopt = nopt\n self.op = op\n self.normalize = normalize\n self.verbose = verbose\n self.random_state = random_state\n self.kwargs_optimizer = kwargs_optimizer\n\n # Variables are used outside the __init__\n self.samples = None\n self.values = None\n self.sample_mean, self.sample_std = None, None\n self.value_mean, self.value_std = None, None\n self.rmodel, self.cmodel = None, None\n self.beta, self.gamma, self.err_var = None, None, None\n self.F_dash, self.C_inv, self.G = None, None, None\n self.F, self.R = None, None\n\n # Initialize and run preliminary error checks.\n if self.reg_model is None:\n raise NotImplementedError(\"UQpy: Regression model is not defined.\")\n\n if self.corr_model is None:\n raise NotImplementedError(\"Uqpy: Correlation model is not defined.\")\n\n if self.corr_model_params is None:\n raise NotImplementedError(\"UQpy: corr_model_params is not defined.\")\n\n if self.bounds is None:\n self.bounds = [[0.001, 10**7]]*self.corr_model_params.shape[0]\n\n if self.optimizer is None:\n from scipy.optimize import fmin_l_bfgs_b\n self.optimizer = fmin_l_bfgs_b\n self.kwargs_optimizer = {'bounds': self.bounds}\n elif callable(self.optimizer):\n self.optimizer = self.optimizer\n else:\n raise TypeError('UQpy: Input optimizer should be None (set to scipy.optimize.minimize) or a callable.')\n\n if type(self.reg_model).__name__ == 'function':\n self.rmodel = 'User defined'\n self.reg_model = self.reg_model\n elif self.reg_model in ['Constant', 'Linear', 'Quadratic']:\n self.rmodel = self.reg_model\n self.reg_model = self._regress()\n else:\n raise NotImplementedError(\"UQpy: Doesn't recognize the Regression model.\")\n\n if type(self.corr_model).__name__ == 'function':\n self.cmodel = 'User defined'\n self.corr_model = self.corr_model\n elif self.corr_model in ['Exponential', 'Gaussian', 'Linear', 'Spherical', 'Cubic', 'Spline', 'Other']:\n self.cmodel = self.corr_model\n self.corr_model: callable = self._corr()\n else:\n raise NotImplementedError(\"UQpy: Doesn't recognize the Correlation model.\")\n\n if isinstance(self.random_state, int):\n self.random_state = np.random.RandomState(self.random_state)\n elif not isinstance(self.random_state, (type(None), np.random.RandomState)):\n raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')\n\n def fit(self, samples, values, nopt=None, corr_model_params=None):\n \"\"\"\n Fit the surrogate model using the training samples and the corresponding model values.\n\n The user can run this method multiple time after initiating the ``Kriging`` class object.\n\n This method updates the samples and parameters of the ``Kriging`` object. This method uses `corr_model_params`\n from previous run as the starting point for MLE problem unless user provides a new starting point.\n\n **Inputs:**\n\n * **samples** (`ndarray`):\n `ndarray` containing the training points.\n\n * **values** (`ndarray`):\n `ndarray` containing the model evaluations at the training points.\n\n **Output/Return:**\n\n The ``fit`` method has no returns, although it creates the `beta`, `err_var` and `C_inv` attributes of the\n ``Kriging`` class.\n\n \"\"\"\n from scipy.linalg import cholesky\n\n if self.verbose:\n print('UQpy: Running Kriging.fit')\n\n def log_likelihood(p0, cm, s, f, y):\n # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference\n m = s.shape[0]\n n = s.shape[1]\n r__, dr_ = cm(x=s, s=s, params=p0, dt=True)\n try:\n cc = cholesky(r__ + 2**(-52) * np.eye(m), lower=True)\n except np.linalg.LinAlgError:\n return np.inf, np.zeros(n)\n\n # Product of diagonal terms is negligible sometimes, even when cc exists.\n if np.prod(np.diagonal(cc)) == 0:\n return np.inf, np.zeros(n)\n\n cc_inv = np.linalg.inv(cc)\n r_inv = np.matmul(cc_inv.T, cc_inv)\n f__ = cc_inv.dot(f)\n y__ = cc_inv.dot(y)\n\n q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE\n\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n\n # Design parameters\n beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__))\n\n # Computing the process variance (Eq: 3.13, DACE)\n sigma_ = np.zeros(y.shape[1])\n\n ll = 0\n for out_dim in range(y.shape[1]):\n sigma_[out_dim] = (1 / m) * (np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2)\n # Objective function:= log(det(sigma**2 * R)) + constant\n ll = ll + (np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1))/2\n\n # Gradient of loglikelihood\n # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press,\n # 2006, ISBN 026218253X. (Page 114, Eq.(5.9))\n residual = y - np.matmul(f, beta_)\n gamma = np.matmul(r_inv, residual)\n grad_mle = np.zeros(n)\n for in_dim in range(n):\n r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv))\n tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual))\n for out_dim in range(y.shape[1]):\n alpha = gamma / sigma_[out_dim]\n tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim]\n cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m\n grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace(np.matmul(tmp1, cov_der))\n\n return ll, grad_mle\n\n if nopt is not None:\n self.nopt = nopt\n if corr_model_params is not None:\n self.corr_model_params = corr_model_params\n self.samples = np.array(samples)\n\n # Number of samples and dimensions of samples and values\n nsamples, input_dim = self.samples.shape\n output_dim = int(np.size(values) / nsamples)\n\n self.values = np.array(values).reshape(nsamples, output_dim)\n\n # Normalizing the data\n if self.normalize:\n self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0)\n self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0)\n s_ = (self.samples - self.sample_mean)/self.sample_std\n y_ = (self.values - self.value_mean)/self.value_std\n else:\n s_ = self.samples\n y_ = self.values\n\n self.F, jf_ = self.reg_model(s_)\n\n # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters\n if self.op:\n starting_point = self.corr_model_params\n minimizer, fun_value = np.zeros([self.nopt, input_dim]), np.zeros([self.nopt, 1])\n for i__ in range(self.nopt):\n p_ = self.optimizer(log_likelihood, starting_point, args=(self.corr_model, s_, self.F, y_),\n **self.kwargs_optimizer)\n minimizer[i__, :] = p_[0]\n fun_value[i__, 0] = p_[1]\n # Generating new starting points using log-uniform distribution\n if i__ != self.nopt - 1:\n starting_point = stats.reciprocal.rvs([j[0] for j in self.bounds], [j[1] for j in self.bounds], 1,\n random_state=self.random_state)\n if min(fun_value) == np.inf:\n raise NotImplementedError(\"Maximum likelihood estimator failed: Choose different starting point or \"\n \"increase nopt\")\n t = np.argmin(fun_value)\n self.corr_model_params = minimizer[t, :]\n\n # Updated Correlation matrix corresponding to MLE estimates of hyperparameters\n self.R = self.corr_model(x=s_, s=s_, params=self.corr_model_params)\n # Compute the regression coefficient (solving this linear equation: F * beta = Y)\n c = np.linalg.cholesky(self.R) # Eq: 3.8, DACE\n c_inv = np.linalg.inv(c)\n f_dash = np.linalg.solve(c, self.F)\n y_dash = np.linalg.solve(c, y_)\n q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n # Design parameters (beta: regression coefficient)\n self.beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash))\n\n # Design parameter (R * gamma = Y - F * beta = residual)\n self.gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, self.beta)))\n\n # Computing the process variance (Eq: 3.13, DACE)\n self.err_var = np.zeros(output_dim)\n for i in range(output_dim):\n self.err_var[i] = (1 / nsamples) * (np.linalg.norm(y_dash[:, i] - np.matmul(f_dash, self.beta[:, i])) ** 2)\n\n self.F_dash, self.C_inv, self.G = f_dash, c_inv, g_\n\n if self.verbose:\n print('UQpy: Kriging fit complete.')\n\n def predict(self, x, return_std=False):\n \"\"\"\n Predict the model response at new points.\n\n This method evaluates the regression and correlation model at new sample points. Then, it predicts the function\n value and standard deviation.\n\n **Inputs:**\n\n * **x** (`list` or `numpy array`):\n Points at which to predict the model response.\n\n * **return_std** (`Boolean`):\n Indicator to estimate standard deviation.\n\n **Outputs:**\n\n * **f_x** (`numpy array`):\n Predicted values at the new points.\n\n * **std_f_x** (`numpy array`):\n Standard deviation of predicted values at the new points.\n\n \"\"\"\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean)/self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n fx, jf = self.reg_model(x_)\n rx = self.corr_model(x=x_, s=s_, params=self.corr_model_params)\n y = np.einsum('ij,jk->ik', fx, self.beta) + np.einsum('ij,jk->ik', rx, self.gamma)\n if self.normalize:\n y = self.value_mean + y * self.value_std\n if x_.shape[1] == 1:\n y = y.flatten()\n if return_std:\n r_dash = np.matmul(self.C_inv, rx.T)\n u = np.matmul(self.F_dash.T, r_dash) - fx.T\n norm1 = np.linalg.norm(r_dash, 2, 0)\n norm2 = np.linalg.norm(np.linalg.solve(self.G, u), 2, 0)\n mse = self.err_var * np.atleast_2d(1 + norm2 - norm1).T\n if self.normalize:\n mse = self.value_std * np.sqrt(mse)\n if x_.shape[1] == 1:\n mse = mse.flatten()\n return y, mse\n else:\n return y\n\n def jacobian(self, x):\n \"\"\"\n Predict the gradient of the model at new points.\n\n This method evaluates the regression and correlation model at new sample point. Then, it predicts the gradient\n using the regression coefficients and the training data.\n\n **Input:**\n\n * **x** (`list` or `numpy array`):\n Points at which to evaluate the gradient.\n\n **Output:**\n\n * **grad_x** (`list` or `numpy array`):\n Gradient of the surrogate model evaluated at the new points.\n\n \"\"\"\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std/self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad\n\n # Defining Regression model (Linear)\n def _regress(self):\n if self.reg_model == 'Constant':\n def r(s):\n s = np.atleast_2d(s)\n fx = np.ones([np.size(s, 0), 1])\n jf = np.zeros([np.size(s, 0), np.size(s, 1), 1])\n return fx, jf\n elif self.reg_model == 'Linear':\n def r(s):\n s = np.atleast_2d(s)\n fx = np.concatenate((np.ones([np.size(s, 0), 1]), s), 1)\n jf_b = np.zeros([np.size(s, 0), np.size(s, 1), np.size(s, 1)])\n np.einsum('jii->ji', jf_b)[:] = 1\n jf = np.concatenate((np.zeros([np.size(s, 0), np.size(s, 1), 1]), jf_b), 2)\n return fx, jf\n else:\n def r(s):\n s = np.atleast_2d(s)\n fx = np.zeros([np.size(s, 0), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)])\n jf = np.zeros(\n [np.size(s, 0), np.size(s, 1), int((np.size(s, 1) + 1) * (np.size(s, 1) + 2) / 2)])\n for i in range(np.size(s, 0)):\n temp = np.hstack((1, s[i, :]))\n for j in range(np.size(s, 1)):\n temp = np.hstack((temp, s[i, j] * s[i, j::]))\n fx[i, :] = temp\n # definie H matrix\n h_ = 0\n for j in range(np.size(s, 1)):\n tmp_ = s[i, j] * np.eye(np.size(s, 1))\n t1 = np.zeros([np.size(s, 1), np.size(s, 1)])\n t1[j, :] = s[i, :]\n tmp = tmp_ + t1\n if j == 0:\n h_ = tmp[:, j::]\n else:\n h_ = np.hstack((h_, tmp[:, j::]))\n jf[i, :, :] = np.hstack((np.zeros([np.size(s, 1), 1]), np.eye(np.size(s, 1)), h_))\n return fx, jf\n\n return r\n\n # Defining Correlation model (Gaussian Process)\n def _corr(self):\n def check_samples_and_return_stack(x, s):\n x_, s_ = np.atleast_2d(x), np.atleast_2d(s)\n # Create stack matrix, where each block is x_i with all s\n stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, (\n np.size(x_, 0),\n 1, 1))\n return stack\n\n def derivatives(x_, s_, params):\n stack = check_samples_and_return_stack(x_, s_)\n # Taking stack and creating array of all thetaj*dij\n after_parameters = params * abs(stack)\n # Create matrix of all ones to compare\n comp_ones = np.ones((np.size(x_, 0), np.size(s_, 0), np.size(s_, 1)))\n # zeta_matrix has all values min{1,theta*dij}\n zeta_matrix_ = np.minimum(after_parameters, comp_ones)\n # Copy zeta_matrix to another matrix that will used to find where derivative should be zero\n indices = zeta_matrix_.copy()\n # If value of min{1,theta*dij} is 1, the derivative should be 0.\n # So, replace all values of 1 with 0, then perform the .astype(bool).astype(int)\n # operation like in the linear example, so you end up with an array of 1's where\n # the derivative should be caluclated and 0 where it should be zero\n indices[indices == 1] = 0\n # Create matrix of all |dij| (where non zero) to be used in calculation of dR/dtheta\n dtheta_derivs_ = indices.astype(bool).astype(int) * abs(stack)\n # Same as above, but for matrix of all thetaj where non-zero\n dx_derivs_ = indices.astype(bool).astype(int) * params * np.sign(stack)\n return zeta_matrix_, dtheta_derivs_, dx_derivs_\n\n if self.corr_model == 'Exponential':\n def c(x, s, params, dt=False, dx=False):\n stack = check_samples_and_return_stack(x, s)\n rx = np.exp(np.sum(-params * abs(stack), axis=2))\n if dt:\n drdt = - abs(stack) * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0))\n return rx, drdt\n if dx:\n drdx = - params * np.sign(stack) * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0))\n return rx, drdx\n return rx\n elif self.corr_model == 'Gaussian':\n def c(x, s, params, dt=False, dx=False):\n stack = check_samples_and_return_stack(x, s)\n rx = np.exp(np.sum(-params * (stack ** 2), axis=2))\n if dt:\n drdt = -(stack ** 2) * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0))\n return rx, drdt\n if dx:\n drdx = - 2 * params * stack * np.transpose(np.tile(rx, (np.size(x, 1), 1, 1)), (1, 2, 0))\n return rx, drdx\n return rx\n elif self.corr_model == 'Linear':\n def c(x, s, params, dt=False, dx=False):\n stack = check_samples_and_return_stack(x, s)\n # Taking stack and turning each d value into 1-theta*dij\n after_parameters = 1 - params * abs(stack)\n # Define matrix of zeros to compare against (not necessary to be defined separately,\n # but the line is bulky if this isn't defined first, and it is used more than once)\n comp_zero = np.zeros((np.size(x, 0), np.size(s, 0), np.size(s, 1)))\n # Compute matrix of max{0,1-theta*d}\n max_matrix = np.maximum(after_parameters, comp_zero)\n rx = np.prod(max_matrix, 2)\n # Create matrix that has 1s where max_matrix is nonzero\n # -Essentially, this acts as a way to store the indices of where the values are nonzero\n ones_and_zeros = max_matrix.astype(bool).astype(int)\n # Set initial derivatives as if all were positive\n first_dtheta = -abs(stack)\n first_dx = np.negative(params) * np.sign(stack)\n # Multiply derivs by ones_and_zeros...this will set the values where the\n # derivative should be zero to zero, and keep all other values the same\n drdt = np.multiply(first_dtheta, ones_and_zeros)\n drdx = np.multiply(first_dx, ones_and_zeros)\n if dt:\n # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter\n for i in range(len(params) - 1):\n drdt = drdt * np.roll(max_matrix, i + 1, axis=2)\n return rx, drdt\n if dx:\n # Loop over parameters, shifting max_matrix and multiplying over derivative matrix with each iter\n for i in range(len(params) - 1):\n drdx = drdx * np.roll(max_matrix, i + 1, axis=2)\n return rx, drdx\n return rx\n elif self.corr_model == 'Spherical':\n def c(x, s, params, dt=False, dx=False):\n zeta_matrix, dtheta_derivs, dx_derivs = derivatives(x_=x, s_=s, params=params)\n # Initial matrices containing derivates for all values in array. Note since\n # dtheta_s and dx_s already accounted for where derivative should be zero, all\n # that must be done is multiplying the |dij| or thetaj matrix on top of a\n # matrix of derivates w.r.t zeta (in this case, dzeta = -1.5+1.5zeta**2)\n drdt = (-1.5 + 1.5 * zeta_matrix ** 2) * dtheta_derivs\n drdx = (-1.5 + 1.5 * zeta_matrix ** 2) * dx_derivs\n # Also, create matrix for values of equation, 1 - 1.5zeta + 0.5zeta**3, for loop\n zeta_function = 1 - 1.5 * zeta_matrix + 0.5 * zeta_matrix ** 3\n rx = np.prod(zeta_function, 2)\n if dt:\n # Same as previous example, loop over zeta matrix by shifting index\n for i in range(len(params) - 1):\n drdt = drdt * np.roll(zeta_function, i + 1, axis=2)\n return rx, drdt\n if dx:\n # Same as previous example, loop over zeta matrix by shifting index\n for i in range(len(params) - 1):\n drdx = drdx * np.roll(zeta_function, i + 1, axis=2)\n return rx, drdx\n return rx\n elif self.corr_model == 'Cubic':\n def c(x, s, params, dt=False, dx=False):\n zeta_matrix, dtheta_derivs, dx_derivs = derivatives(x_=x, s_=s, params=params)\n # Initial matrices containing derivates for all values in array. Note since\n # dtheta_s and dx_s already accounted for where derivative should be zero, all\n # that must be done is multiplying the |dij| or thetaj matrix on top of a\n # matrix of derivates w.r.t zeta (in this case, dzeta = -6zeta+6zeta**2)\n drdt = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dtheta_derivs\n drdx = (-6 * zeta_matrix + 6 * zeta_matrix ** 2) * dx_derivs\n # Also, create matrix for values of equation, 1 - 3zeta**2 + 2zeta**3, for loop\n zeta_function_cubic = 1 - 3 * zeta_matrix ** 2 + 2 * zeta_matrix ** 3\n rx = np.prod(zeta_function_cubic, 2)\n if dt:\n # Same as previous example, loop over zeta matrix by shifting index\n for i in range(len(params) - 1):\n drdt = drdt * np.roll(zeta_function_cubic, i + 1, axis=2)\n return rx, drdt\n if dx:\n # Same as previous example, loop over zeta matrix by shifting index\n for i in range(len(params) - 1):\n drdx = drdx * np.roll(zeta_function_cubic, i + 1, axis=2)\n return rx, drdx\n return rx\n else:\n def c(x, s, params, dt=False, dx=False):\n # x_, s_ = np.atleast_2d(x_), np.atleast_2d(s_)\n # # Create stack matrix, where each block is x_i with all s\n # stack = np.tile(np.swapaxes(np.atleast_3d(x_), 1, 2), (1, np.size(s_, 0), 1)) - np.tile(s_, (\n # np.size(x_, 0),\n # 1, 1))\n stack = check_samples_and_return_stack(x, s)\n # In this case, the zeta value is just abs(stack)*parameters, no comparison\n zeta_matrix = abs(stack) * params\n # So, dtheta and dx are just |dj| and theta*sgn(dj), respectively\n dtheta_derivs = abs(stack)\n # dx_derivs = np.ones((np.size(x,0),np.size(s,0),np.size(s,1)))*parameters\n dx_derivs = np.sign(stack) * params\n\n # Initialize empty sigma and dsigma matrices\n sigma = np.ones((zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]))\n dsigma = np.ones((zeta_matrix.shape[0], zeta_matrix.shape[1], zeta_matrix.shape[2]))\n\n # Loop over cases to create zeta_matrix and subsequent dR matrices\n for i in range(zeta_matrix.shape[0]):\n for j in range(zeta_matrix.shape[1]):\n for k in range(zeta_matrix.shape[2]):\n y = zeta_matrix[i, j, k]\n if 0 <= y <= 0.2:\n sigma[i, j, k] = 1 - 15 * y ** 2 + 30 * y ** 3\n dsigma[i, j, k] = -30 * y + 90 * y ** 2\n elif 0.2 < y < 1.0:\n sigma[i, j, k] = 1.25 * (1 - y) ** 3\n dsigma[i, j, k] = 3.75 * (1 - y) ** 2 * -1\n elif y >= 1:\n sigma[i, j, k] = 0\n dsigma[i, j, k] = 0\n\n rx = np.prod(sigma, 2)\n\n if dt:\n # Initialize derivative matrices incorporating chain rule\n drdt = dsigma * dtheta_derivs\n # Loop over to create proper matrices\n for i in range(len(params) - 1):\n drdt = drdt * np.roll(sigma, i + 1, axis=2)\n return rx, drdt\n if dx:\n # Initialize derivative matrices incorporating chain rule\n drdx = dsigma * dx_derivs\n # Loop over to create proper matrices\n for i in range(len(params) - 1):\n drdx = drdx * np.roll(sigma, i + 1, axis=2)\n return rx, drdx\n return rx\n return c\n\n\n##############################################################################\n##############################################################################\n# Polynomial Chaos Expansion (PCE) #\n##############################################################################\n##############################################################################\n\n\nclass PCE:\n \"\"\"\n Constructs a surrogate model based on the Polynomial Chaos Expansion (PCE)\n method.\n\n **Inputs:**\n\n * **method** (class):\n object for the method used for the calculation of the PCE coefficients.\n\n **Methods:**\n\n \"\"\"\n\n def __init__(self, method, verbose=False):\n self.method = method\n self.verbose = verbose\n self.C = None\n self.b = None\n\n def fit(self, x, y):\n \"\"\"\n Fit the surrogate model using the training samples and the\n corresponding model values. This method calls the 'run' method of the\n input method class.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the training points.\n\n * **y** (`ndarray`):\n `ndarray` containing the model evaluations at the training points.\n\n **Output/Return:**\n\n The ``fit`` method has no returns and it creates an `ndarray` with the\n PCE coefficients.\n \"\"\"\n\n if self.verbose:\n print('UQpy: Running PCE.fit')\n\n if type(self.method) == PolyChaosLstsq:\n self.C = self.method.run(x, y)\n\n elif type(self.method) == PolyChaosLasso or \\\n type(self.method) == PolyChaosRidge:\n self.C, self.b = self.method.run(x, y)\n\n if self.verbose:\n print('UQpy: PCE fit complete.')\n\n def predict(self, x_test):\n\n \"\"\"\n Predict the model response at new points.\n This method evaluates the PCE model at new sample points.\n\n **Inputs:**\n\n * **x_test** (`ndarray`):\n Points at which to predict the model response.\n\n **Outputs:**\n\n * **y** (`ndarray`):\n Predicted values at the new points.\n\n \"\"\"\n\n a = self.method.poly_object.evaluate(x_test)\n\n if type(self.method) == PolyChaosLstsq:\n y = a.dot(self.C)\n\n elif type(self.method) == PolyChaosLasso or \\\n type(self.method) == PolyChaosRidge:\n y = a.dot(self.C) + self.b\n\n return y\n\n\nclass PolyChaosLstsq:\n \"\"\"\n Class to calculate the PCE coefficients via the least-squares solution to\n the linear matrix equation. The equation may be under-, well-, or\n over-determined.\n\n **Inputs:**\n\n * **poly_object** ('class'):\n Object from the 'Polynomial' class\n\n **Methods:**\n\n \"\"\"\n\n def __init__(self, poly_object, verbose=False):\n self.poly_object = poly_object\n self.verbose = verbose\n\n def run(self, x, y):\n \"\"\"\n Least squares solution to compute the PCE coefficients.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the training points (samples).\n\n * **y** (`ndarray`):\n `ndarray` containing the model evaluations (labels) at the\n training points.\n\n **Outputs:**\n\n * **c_** (`ndarray`):\n Returns the PCE coefficients.\n\n \"\"\"\n a = self.poly_object.evaluate(x)\n c_, res, rank, sing = np.linalg.lstsq(a, y)\n if c_.ndim == 1:\n c_ = c_.reshape(-1, 1)\n\n return c_\n\n\nclass PolyChaosLasso:\n \"\"\"\n Class to calculate the PCE coefficients with the Least Absolute Shrinkage\n and Selection Operator (LASSO) method.\n\n **Inputs:**\n\n * **poly_object** ('class'):\n Object from the 'Polynomial' class\n\n **Methods:**\n\n \"\"\"\n\n def __init__(self, poly_object, learning_rate=0.01, iterations=1000,\n penalty=1, verbose=False):\n self.poly_object = poly_object\n self.learning_rate = learning_rate\n self.iterations = iterations\n self.penalty = penalty\n self.verbose = verbose\n\n def run(self, x, y):\n \"\"\"\n Implements the LASSO method to compute the PCE coefficients.\n\n **Inputs:**\n\n * **poly_object** (`object`):\n Polynomial object.\n\n * **learning_rate** (`float`):\n Size of steps for the gradient descent.\n\n * **iterations** (`int`):\n Number of iterations of the optimization algorithm.\n\n * **penalty** (`float`):\n Penalty parameter controls the strength of regularization. When it\n is close to zero, then the Lasso regression converges to the linear\n regression, while when it goes to infinity, PCE coefficients\n converge to zero.\n\n **Outputs:**\n\n * **w** (`ndarray`):\n Returns the weights (PCE coefficients) of the regressor.\n\n * **b** (`float`):\n Returns the bias of the regressor.\n \"\"\"\n\n xx = self.poly_object.evaluate(x)\n m, n = xx.shape\n\n if y.ndim == 1 or y.shape[1] == 1:\n y = y.reshape(-1, 1)\n w = np.zeros(n).reshape(-1, 1)\n dw = np.zeros(n).reshape(-1, 1)\n b = 0\n\n for _ in range(self.iterations):\n y_pred = (xx.dot(w) + b)\n\n for i in range(n):\n if w[i] > 0:\n dw[i] = (-(2 * (xx.T[i, :]).dot(y - y_pred)) + self.penalty) / m\n else:\n dw[i] = (-(2 * (xx.T[i, :]).dot(y - y_pred)) - self.penalty) / m\n\n db = - 2 * np.sum(y - y_pred) / m\n\n w = w - self.learning_rate * dw\n b = b - self.learning_rate * db\n\n else:\n n_out_dim = y.shape[1]\n w = np.zeros((n, n_out_dim))\n b = np.zeros(n_out_dim).reshape(1, -1)\n\n for _ in range(self.iterations):\n y_pred = (xx.dot(w) + b)\n\n dw = (-(2 * xx.T.dot(y - y_pred)) - self.penalty) / m\n db = - 2 * np.sum((y - y_pred), axis=0).reshape(1, -1) / m\n\n w = w - self.learning_rate * dw\n b = b - self.learning_rate * db\n\n return w, b\n\n\nclass PolyChaosRidge:\n \"\"\"\n Class to calculate the PCE coefficients with the Ridge regression method.\n\n **Inputs:**\n\n * **poly_object** ('class'):\n Object from the 'Polynomial' class\n\n **Methods:**\n \"\"\"\n\n def __init__(self, poly_object, learning_rate=0.01, iterations=1000,\n penalty=1, verbose=False):\n self.poly_object = poly_object\n self.learning_rate = learning_rate\n self.iterations = iterations\n self.penalty = penalty\n self.verbose = verbose\n\n def run(self, x, y):\n \"\"\"\n Implements the LASSO method to compute the PCE coefficients.\n\n **Inputs:**\n\n * **poly_object** (`object`):\n Polynomial object.\n\n * **learning_rate** (`float`):\n Size of steps for the gradient descent.\n\n * **iterations** (`int`):\n Number of iterations of the optimization algorithm.\n\n * **penalty** (`float`):\n Penalty parameter controls the strength of regularization. When it\n is close to zero, then the ridge regression converges to the linear\n regression, while when it goes to infinity, PCE coefficients\n converge to zero.\n\n **Outputs:**\n\n * **w** (`ndarray`):\n Returns the weights (PCE coefficients) of the regressor.\n\n * **b** (`float`):\n Returns the bias of the regressor.\n\n \"\"\"\n\n xx = self.poly_object.evaluate(x)\n m, n = xx.shape\n\n if y.ndim == 1 or y.shape[1] == 1:\n y = y.reshape(-1, 1)\n w = np.zeros(n).reshape(-1, 1)\n b = 0\n\n for _ in range(self.iterations):\n y_pred = (xx.dot(w) + b).reshape(-1, 1)\n\n dw = (-(2 * xx.T.dot(y - y_pred)) + (2 * self.penalty * w)) / m\n db = - 2 * np.sum(y - y_pred) / m\n\n w = w - self.learning_rate * dw\n b = b - self.learning_rate * db\n\n else:\n n_out_dim = y.shape[1]\n w = np.zeros((n, n_out_dim))\n b = np.zeros(n_out_dim).reshape(1, -1)\n\n for _ in range(self.iterations):\n y_pred = (xx.dot(w) + b)\n\n dw = (-(2 * xx.T.dot(y - y_pred)) + (2 * self.penalty * w)) / m\n db = - 2 * np.sum((y - y_pred), axis=0).reshape(1, -1) / m\n\n w = w - self.learning_rate * dw\n b = b - self.learning_rate * db\n\n return w, b\n\n\nclass Polynomials:\n \"\"\"\n Class for polynomials used for the PCE method.\n\n **Inputs:**\n\n * **dist_object** ('class'):\n Object from a distribution class.\n\n * **degree** ('int'):\n Maximum degree of the polynomials.\n\n **Methods:**\n \"\"\"\n\n def __init__(self, dist_object, degree):\n self.dist_object = dist_object\n self.degree = degree + 1\n\n @staticmethod\n def standardize_normal(x, mean, std):\n \"\"\"\n Static method: Standardize data based on the standard normal\n distribution N(0,1).\n\n **Input:**\n\n * **x** (`ndarray`)\n Input data generated from a normal distribution.\n\n * **mean** (`list`)\n Mean value of the original normal distribution.\n\n * **std** (`list`)\n Standard deviation of the original normal distribution.\n\n **Output/Returns:**\n\n `ndarray`\n Standardized data.\n\n \"\"\"\n return (x - mean) / std\n\n @staticmethod\n def standardize_uniform(x, m, scale):\n \"\"\"\n Static method: Standardize data based on the uniform distribution\n U(-1,1).\n\n **Input:**\n\n * **x** (`ndarray`)\n Input data generated from a normal distribution.\n\n * **m** (`float`)\n Mean value of the original uniform distribution.\n\n * **b** (`list`)\n Scale of the original uniform distribution.\n\n **Output/Returns:**\n\n `ndarray`\n Standardized data.\n\n \"\"\"\n return (x - m) / (scale / 2)\n\n @staticmethod\n def normalized(degree, x, a, b, pdf_st, p):\n \"\"\"\n Static method: Calculates design matrix and normalized polynomials.\n\n **Input:**\n\n * **x** (`ndarray`)\n Input samples.\n\n * **a** (`float`)\n Left bound of the support the distribution.\n\n * **b** (`floar`)\n Right bound of the support of the distribution.\n\n * **pdf_st** (`function`)\n Pdf function generated from UQpy distribution object.\n\n * **p** (`list`)\n List containing the orthogonal polynomials generated with scipy.\n\n **Output/Returns:**\n\n * **a** (`ndarray`)\n Returns the design matrix\n\n * **pol_normed** (`ndarray`)\n Returns the normalized polynomials.\n\n \"\"\"\n\n pol_normed = []\n m = np.zeros((degree, degree))\n for i in range(degree):\n for j in range(degree):\n int_res = integrate.quad(lambda k: p[i](k) * p[j](k) * pdf_st(k),\n a, b, epsabs=1e-15, epsrel=1e-15)\n m[i, j] = int_res[0]\n pol_normed.append(p[i] / np.sqrt(m[i, i]))\n\n a = np.zeros((x.shape[0], degree))\n for i in range(x.shape[0]):\n for j in range(degree):\n a[i, j] = pol_normed[j](x[i])\n\n return a, pol_normed\n\n def get_mean(self):\n \"\"\"\n Returns a `float` with the mean of the UQpy distribution object.\n \"\"\"\n m = self.dist_object.moments(moments2return='m')\n return m\n\n def get_std(self):\n \"\"\"\n Returns a `float` with the variance of the UQpy distribution object.\n \"\"\"\n s = np.sqrt(self.dist_object.moments(moments2return='v'))\n return s\n\n def location(self):\n \"\"\"\n Returns a `float` with the location of the UQpy distribution object.\n \"\"\"\n m = self.dist_object.__dict__['params']['loc']\n return m\n\n def scale(self):\n \"\"\"\n Returns a `float` with the scale of the UQpy distribution object.\n \"\"\"\n s = self.dist_object.__dict__['params']['scale']\n return s\n\n def evaluate(self, x):\n \"\"\"\n Calculates the design matrix. Rows represent the input samples and\n columns the multiplied polynomials whose degree must not exceed the\n maximum degree of polynomials.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the samples.\n\n **Outputs:**\n\n * **design** (`ndarray`):\n Returns an array with the design matrix.\n \"\"\"\n\n if not type(self.dist_object) == JointInd:\n if type(self.dist_object) == Normal:\n return Hermite(self.degree, self.dist_object).get_polys(x)[0]\n # design matrix (data x polynomials)\n\n if type(self.dist_object) == Uniform:\n return Legendre(self.degree, self.dist_object).get_polys(x)[0]\n\n else:\n raise TypeError('Warning: This distribution is not supported.')\n\n else:\n\n a = []\n for i in range(len(self.dist_object.marginals)):\n\n if isinstance(self.dist_object.marginals[i], Normal):\n a.append(Hermite(self.degree,\n self.dist_object.marginals[i]).get_polys(x[:, i])[0])\n\n elif isinstance(self.dist_object.marginals[i], Uniform):\n a.append(Legendre(self.degree,\n self.dist_object.marginals[i]).get_polys(x[:, i])[0])\n\n else:\n raise TypeError('Warning: This distribution is not supported.')\n\n # Compute all possible valid combinations\n m = len(a) # number of variables\n p = self.degree # maximum polynomial order\n\n p_ = np.arange(0, p, 1).tolist()\n res = list(itertools.product(p_, repeat=m))\n # sum of poly orders\n sum_ = [int(math.fsum(res[i])) for i in range(len(res))]\n indices = sorted(range(len(sum_)), key=lambda k: sum_[k])\n res_new = [res[indices[i]] for i in range(len(res))]\n comb = [(0,) * m]\n\n for i in range(m):\n t = [0] * m\n t[i] = 1\n comb.append(tuple(t))\n\n for i in range(len(res_new)):\n if 1 < int(math.fsum(res_new[i])) <= p - 1:\n rev = res_new[i][::-1]\n comb.append(rev)\n\n design = np.ones((x.shape[0], len(comb)))\n for i in range(len(comb)):\n for j in range(m):\n h = [a[j][k][comb[i][j]] for k in range(x.shape[0])]\n design[:, i] *= h\n\n return design\n\n\nclass Hermite(Polynomials):\n \"\"\"\n Class of univariate polynomials appropriate for data generated from a\n normal distribution.\n\n **Inputs:**\n\n * **degree** ('int'):\n Maximum degree of the polynomials.\n\n * **dist_object** ('class'):\n Distribution object of the generated samples.\n\n **Methods:**\n \"\"\"\n\n def __init__(self, degree, dist_object):\n super().__init__(dist_object, degree)\n self.degree = degree\n self.pdf = self.dist_object.pdf\n\n def get_polys(self, x):\n \"\"\"\n Calculates the normalized Hermite polynomials evaluated at sample points.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the samples.\n\n **Outputs:**\n\n (`list`):\n Returns a list of 'ndarrays' with the design matrix and the\n normalized polynomials.\n \"\"\"\n a, b = -np.inf, np.inf\n mean_ = Polynomials.get_mean(self)\n std_ = Polynomials.get_std(self)\n x_ = Polynomials.standardize_normal(x, mean_, std_)\n\n norm = Normal(0, 1)\n pdf_st = norm.pdf\n\n p = []\n for i in range(self.degree):\n p.append(special.hermitenorm(i, monic=False))\n\n return Polynomials.normalized(self.degree, x_, a, b, pdf_st, p)\n\n\nclass Legendre(Polynomials):\n \"\"\"\n Class of univariate polynomials appropriate for data generated from a\n uniform distribution.\n\n **Inputs:**\n\n * **degree** ('int'):\n Maximum degree of the polynomials.\n\n * **dist_object** ('class'):\n Distribution object of the generated samples.\n\n **Methods:**\n \"\"\"\n\n def __init__(self, degree, dist_object):\n super().__init__(dist_object, degree)\n self.degree = degree\n self.pdf = self.dist_object.pdf\n\n def get_polys(self, x):\n \"\"\"\n Calculates the normalized Legendre polynomials evaluated at sample points.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the samples.\n\n * **y** (`ndarray`):\n `ndarray` containing the samples.\n\n **Outputs:**\n\n (`list`):\n Returns a list of 'ndarrays' with the design matrix and the\n normalized polynomials.\n\n \"\"\"\n a, b = -1, 1\n m, scale = Polynomials.get_mean(self), Polynomials.scale(self)\n x_ = Polynomials.standardize_uniform(x, m, scale)\n\n uni = Uniform(a, b - a)\n pdf_st = uni.pdf\n\n p = []\n for i in range(self.degree):\n p.append(special.legendre(i, monic=False))\n\n return Polynomials.normalized(self.degree, x_, a, b, pdf_st, p)\n\n\nclass ErrorEstimation:\n \"\"\"\n Class for estimating the error of a PCE surrogate, based on a validation\n dataset.\n\n **Inputs:**\n\n * **surr_object** ('class'):\n Object that defines the surrogate model.\n\n **Methods:**\n \"\"\"\n\n def __init__(self, surr_object):\n self.surr_object = surr_object\n\n def validation(self, x, y):\n \"\"\"\n Returns the validation error.\n\n **Inputs:**\n\n * **x** (`ndarray`):\n `ndarray` containing the samples of the validation dataset.\n\n * **y** (`ndarray`):\n `ndarray` containing model evaluations for the validation dataset.\n\n **Outputs:**\n\n * **eps_val** (`float`)\n Validation error.\n\n \"\"\"\n if y.ndim == 1 or y.shape[1] == 1:\n y = y.reshape(-1, 1)\n\n y_val = self.surr_object.predict(x)\n\n n_samples = x.shape[0]\n mu_yval = (1 / n_samples) * np.sum(y, axis=0)\n eps_val = (n_samples - 1) / n_samples * (\n (np.sum((y - y_val) ** 2, axis=0)) / (np.sum((y - mu_yval) ** 2, axis=0)))\n\n if y.ndim == 1 or y.shape[1] == 1:\n eps_val = float(eps_val)\n\n return np.round(eps_val, 7)\n\n\nclass MomentEstimation:\n \"\"\"\n Class for estimating the moments of the PCE surrogate.\n\n **Inputs:**\n\n * **surr_object** ('class'):\n Object that defines the surrogate model.\n\n **Methods:**\n \"\"\"\n\n def __init__(self, surr_object):\n self.surr_object = surr_object\n\n def get(self):\n \"\"\"\n Returns the first two moments of the PCE surrogate which are directly\n estimated from the PCE coefficients.\n\n **Outputs:**\n\n * **mean, variance** (`tuple`)\n Returns the mean and variance.\n\n \"\"\"\n if self.surr_object.b is not None:\n mean = self.surr_object.C[0, :] + np.squeeze(self.surr_object.b)\n else:\n mean = self.surr_object.C[0, :]\n\n variance = np.sum(self.surr_object.C[1:] ** 2, axis=0)\n\n if self.surr_object.C.ndim == 1 or self.surr_object.C.shape[1] == 1:\n variance = float(variance)\n mean = float(mean)\n\n return np.round(mean, 4), np.round(variance, 4)\n"
] |
[
[
"numpy.minimum",
"numpy.linalg.matrix_rank",
"numpy.einsum",
"numpy.sqrt",
"numpy.squeeze",
"numpy.cumsum",
"numpy.round",
"numpy.concatenate",
"numpy.argmin",
"numpy.mean",
"scipy.special.hermitenorm",
"numpy.linalg.qr",
"numpy.negative",
"numpy.roll",
"numpy.square",
"numpy.hstack",
"numpy.arange",
"numpy.eye",
"numpy.matmul",
"numpy.linalg.det",
"numpy.size",
"numpy.std",
"numpy.zeros",
"numpy.log",
"numpy.multiply",
"numpy.linalg.inv",
"numpy.atleast_2d",
"numpy.linalg.lstsq",
"numpy.identity",
"numpy.linalg.cholesky",
"numpy.transpose",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.diagonal",
"numpy.linalg.solve",
"numpy.maximum",
"scipy.stats.reciprocal.rvs",
"numpy.linalg.norm",
"scipy.special.legendre",
"numpy.ones",
"numpy.sign",
"numpy.atleast_3d",
"numpy.prod"
]
] |
fraimondo/mne-python
|
[
"2fe126debc27d14e5f1d92762757915bb86fcaf5"
] |
[
"mne/io/base.py"
] |
[
"# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n# Matti Hamalainen <msh@nmr.mgh.harvard.edu>\n# Martin Luessi <mluessi@nmr.mgh.harvard.edu>\n# Denis Engemann <denis.engemann@gmail.com>\n# Teon Brooks <teon.brooks@gmail.com>\n# Marijn van Vliet <w.m.vanvliet@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport copy\nfrom copy import deepcopy\nimport warnings\nimport os\nimport os.path as op\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .constants import FIFF\nfrom .pick import pick_types, channel_type, pick_channels, pick_info\nfrom .meas_info import write_meas_info\nfrom .proj import setup_proj, activate_proj, _proj_equal, ProjMixin\nfrom ..channels.channels import (ContainsMixin, PickDropChannelsMixin,\n SetChannelsMixin, InterpolationMixin)\nfrom ..channels.montage import read_montage, _set_montage, Montage\nfrom .compensator import set_current_comp\nfrom .write import (start_file, end_file, start_block, end_block,\n write_dau_pack16, write_float, write_double,\n write_complex64, write_complex128, write_int,\n write_id, write_string)\n\nfrom ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,\n notch_filter, band_stop_filter, resample)\nfrom ..fixes import in1d\nfrom ..parallel import parallel_func\nfrom ..utils import (_check_fname, _check_pandas_installed,\n _check_pandas_index_arguments,\n check_fname, _get_stim_channel, object_hash,\n logger, verbose, _time_mask)\nfrom ..viz import plot_raw, plot_raw_psd\nfrom ..defaults import _handle_default\nfrom ..externals.six import string_types\nfrom ..event import concatenate_events\n\n\nclass ToDataFrameMixin(object):\n '''Class to add to_data_frame capabilities to certain classes.'''\n def _get_check_picks(self, picks, picks_check):\n if picks is None:\n picks = list(range(self.info['nchan']))\n else:\n if not in1d(picks, np.arange(len(picks_check))).all():\n raise ValueError('At least one picked channel is not present '\n 'in this object instance.')\n return picks\n\n def to_data_frame(self, picks=None, index=None, scale_time=1e3,\n scalings=None, copy=True, start=None, stop=None):\n \"\"\"Export data in tabular structure as a pandas DataFrame.\n\n Columns and indices will depend on the object being converted.\n Generally this will include as much relevant information as\n possible for the data type being converted. This makes it easy\n to convert data for use in packages that utilize dataframes,\n such as statsmodels or seaborn.\n\n Parameters\n ----------\n picks : array-like of int | None\n If None only MEG and EEG channels are kept\n otherwise the channels indices in picks are kept.\n index : tuple of str | None\n Column to be used as index for the data. Valid string options\n are 'epoch', 'time' and 'condition'. If None, all three info\n columns will be included in the table as categorial data.\n scale_time : float\n Scaling to be applied to time units.\n scalings : dict | None\n Scaling to be applied to the channels picked. If None, defaults to\n ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)``.\n copy : bool\n If true, data will be copied. Else data may be modified in place.\n start : int | None\n If it is a Raw object, this defines a starting index for creating\n the dataframe from a slice. The times will be interpolated from the\n index and the sampling rate of the signal.\n stop : int | None\n If it is a Raw object, this defines a stop index for creating\n the dataframe from a slice. The times will be interpolated from the\n index and the sampling rate of the signal.\n\n Returns\n -------\n df : instance of pandas.core.DataFrame\n A dataframe suitable for usage with other\n statistical/plotting/analysis packages. Column/Index values will\n depend on the object type being converted, but should be\n human-readable.\n \"\"\"\n from ..epochs import _BaseEpochs\n from ..evoked import Evoked\n from .fiff import RawFIF\n from .array import RawArray\n from ..source_estimate import _BaseSourceEstimate\n\n pd = _check_pandas_installed()\n mindex = list()\n # Treat SourceEstimates special because they don't have the same info\n if isinstance(self, _BaseSourceEstimate):\n if self.subject is None:\n default_index = ['time']\n else:\n default_index = ['subject', 'time']\n data = self.data.T\n times = self.times\n shape = data.shape\n mindex.append(('subject', np.repeat(self.subject, shape[0])))\n\n if isinstance(self.vertices, list):\n # surface source estimates\n col_names = [i for e in [\n ['{0} {1}'.format('LH' if ii < 1 else 'RH', vert)\n for vert in vertno]\n for ii, vertno in enumerate(self.vertices)]\n for i in e]\n else:\n # volume source estimates\n col_names = ['VOL {0}'.format(vert) for vert in self.vertices]\n else:\n if isinstance(self, _BaseEpochs):\n picks = self._get_check_picks(picks, self.ch_names)\n default_index = ['condition', 'epoch', 'time']\n data = self.get_data()[:, picks, :]\n times = self.times\n n_epochs, n_picks, n_times = data.shape\n data = np.hstack(data).T # (time*epochs) x signals\n\n # Multi-index creation\n times = np.tile(times, n_epochs)\n id_swapped = dict((v, k) for k, v in self.event_id.items())\n names = [id_swapped[k] for k in self.events[:, 2]]\n mindex.append(('condition', np.repeat(names, n_times)))\n mindex.append(('epoch',\n np.repeat(np.arange(n_epochs), n_times)))\n col_names = [self.ch_names[k] for k in picks]\n\n elif isinstance(self, (RawFIF, RawArray, Evoked)):\n picks = self._get_check_picks(picks, self.ch_names)\n default_index = ['time']\n if isinstance(self, (RawFIF, RawArray)):\n data, times = self[picks, start:stop]\n elif isinstance(self, Evoked):\n data = self.data[picks, :]\n times = self.times\n n_picks, n_times = data.shape\n data = data.T\n col_names = [self.ch_names[k] for k in picks]\n\n types = [channel_type(self.info, idx) for idx in picks]\n n_channel_types = 0\n ch_types_used = []\n\n scalings = _handle_default('scalings', scalings)\n for t in scalings.keys():\n if t in types:\n n_channel_types += 1\n ch_types_used.append(t)\n\n for t in ch_types_used:\n scaling = scalings[t]\n idx = [picks[i] for i in range(len(picks)) if types[i] == t]\n if len(idx) > 0:\n data[:, idx] *= scaling\n\n # Make sure that the time index is scaled correctly\n times = np.round(times * scale_time)\n mindex.append(('time', times))\n\n if index is not None:\n _check_pandas_index_arguments(index, default_index)\n else:\n index = default_index\n\n if copy is True:\n data = data.copy()\n\n assert all(len(mdx) == len(mindex[0]) for mdx in mindex)\n\n df = pd.DataFrame(data, columns=col_names)\n [df.insert(i, k, v) for i, (k, v) in enumerate(mindex)]\n if index is not None:\n if 'time' in index:\n logger.info('Converting time column to int64...')\n df['time'] = df['time'].astype(np.int64)\n df.set_index(index, inplace=True)\n if all(i in default_index for i in index):\n df.columns.name = 'signal'\n return df\n\n\ndef _check_fun(fun, d, *args, **kwargs):\n want_shape = d.shape\n d = fun(d, *args, **kwargs)\n if not isinstance(d, np.ndarray):\n raise TypeError('Return value must be an ndarray')\n if d.shape != want_shape:\n raise ValueError('Return data must have shape %s not %s'\n % (want_shape, d.shape))\n return d\n\n\nclass _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,\n SetChannelsMixin, InterpolationMixin, ToDataFrameMixin):\n \"\"\"Base class for Raw data\n\n Subclasses must provide the following methods:\n\n * _read_segment_file(self, data, idx, offset, fi, start, stop,\n cals, mult)\n (only needed for types that support on-demand disk reads)\n\n The `_BaseRaw._raw_extras` list can contain whatever data is necessary for\n such on-demand reads. For `RawFIF` this means a list of variables formerly\n known as ``_rawdirs``.\n \"\"\"\n @verbose\n def __init__(self, info, preload=False,\n first_samps=(0,), last_samps=None,\n filenames=(), raw_extras=(),\n comp=None, orig_comp_grade=None,\n orig_format='double', dtype=np.float64,\n verbose=None):\n # wait until the end to preload data, but triage here\n if isinstance(preload, np.ndarray):\n # some functions (e.g., filtering) only work w/64-bit data\n if preload.dtype not in (np.float64, np.complex128):\n raise RuntimeError('datatype must be float64 or complex128, '\n 'not %s' % preload.dtype)\n if preload.dtype != dtype:\n raise ValueError('preload and dtype must match')\n self._data = preload\n self.preload = True\n last_samps = [self._data.shape[1] - 1]\n load_from_disk = False\n else:\n if last_samps is None:\n raise ValueError('last_samps must be given unless preload is '\n 'an ndarray')\n if preload is False:\n self.preload = False\n load_from_disk = False\n elif preload is not True and not isinstance(preload, string_types):\n raise ValueError('bad preload: %s' % preload)\n else:\n load_from_disk = True\n self._last_samps = np.array(last_samps)\n self._first_samps = np.array(first_samps)\n self.info = info\n cals = np.empty(info['nchan'])\n for k in range(info['nchan']):\n cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']\n self.verbose = verbose\n self._cals = cals\n self._raw_extras = list(raw_extras)\n self.comp = comp\n self._orig_comp_grade = orig_comp_grade\n self._filenames = list(filenames)\n self.orig_format = orig_format\n self._projectors = list()\n self._projector = None\n self._dtype_ = dtype\n # If we have True or a string, actually do the preloading\n if load_from_disk:\n self._preload_data(preload)\n self._update_times()\n\n @property\n def _dtype(self):\n \"\"\"dtype for loading data (property so subclasses can override)\"\"\"\n # most classes only store real data, they won't need anything special\n return self._dtype_\n\n def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,\n projector=None, verbose=None):\n \"\"\"Read a chunk of raw data\n\n Parameters\n ----------\n start : int, (optional)\n first sample to include (first is 0). If omitted, defaults to the\n first sample in data.\n stop : int, (optional)\n First sample to not include.\n If omitted, data is included to the end.\n sel : array, optional\n Indices of channels to select.\n data_buffer : array or str, optional\n numpy array to fill with data read, must have the correct shape.\n If str, a np.memmap with the correct data type will be used\n to store the data.\n projector : array\n SSP operator to apply to the data.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n data : array, [channels x samples]\n the data matrix (channels x samples).\n times : array, [samples]\n returns the time values corresponding to the samples.\n \"\"\"\n # Initial checks\n start = int(start)\n stop = self.n_times if stop is None else min([int(stop), self.n_times])\n\n if start >= stop:\n raise ValueError('No data in this range')\n\n logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %\n (start, stop - 1, start / float(self.info['sfreq']),\n (stop - 1) / float(self.info['sfreq'])))\n\n # Initialize the data and calibration vector\n n_sel_channels = self.info['nchan'] if sel is None else len(sel)\n # convert sel to a slice if possible for efficiency\n if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):\n sel = slice(sel[0], sel[-1] + 1)\n idx = slice(None, None, None) if sel is None else sel\n data_shape = (n_sel_channels, stop - start)\n dtype = self._dtype\n if isinstance(data_buffer, np.ndarray):\n if data_buffer.shape != data_shape:\n raise ValueError('data_buffer has incorrect shape')\n data = data_buffer\n elif isinstance(data_buffer, string_types):\n # use a memmap\n data = np.memmap(data_buffer, mode='w+',\n dtype=dtype, shape=data_shape)\n else:\n data = np.zeros(data_shape, dtype=dtype)\n\n # deal with having multiple files accessed by the raw object\n cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,\n dtype='int')))\n cumul_lens = np.cumsum(cumul_lens)\n files_used = np.logical_and(np.less(start, cumul_lens[1:]),\n np.greater_equal(stop - 1,\n cumul_lens[:-1]))\n\n # set up cals and mult (cals, compensation, and projector)\n cals = self._cals.ravel()[np.newaxis, :]\n if self.comp is None and projector is None:\n mult = None\n else:\n mult = list()\n for ri in range(len(self._first_samps)):\n if self.comp is not None:\n if projector is not None:\n mul = self.comp * cals\n mul = np.dot(projector[idx], mul)\n else:\n mul = self.comp[idx] * cals\n elif projector is not None:\n mul = projector[idx] * cals\n else:\n mul = np.diag(self._cals.ravel())[idx]\n mult.append(mul)\n cals = cals.T[idx]\n\n # read from necessary files\n offset = 0\n for fi in np.nonzero(files_used)[0]:\n start_file = self._first_samps[fi]\n # first iteration (only) could start in the middle somewhere\n if offset == 0:\n start_file += start - cumul_lens[fi]\n stop_file = np.min([stop - 1 - cumul_lens[fi] +\n self._first_samps[fi], self._last_samps[fi]])\n if start_file < self._first_samps[fi] or \\\n stop_file > self._last_samps[fi] or \\\n stop_file < start_file or start_file > stop_file:\n raise ValueError('Bad array indexing, could be a bug')\n\n self._read_segment_file(data, idx, offset, fi,\n start_file, stop_file, cals, mult)\n offset += stop_file - start_file + 1\n\n logger.info('[done]')\n times = np.arange(start, stop) / self.info['sfreq']\n return data, times\n\n def _read_segment_file(self, data, idx, offset, fi, start, stop,\n cals, mult):\n \"\"\"Read a segment of data from a file\n\n Only needs to be implemented for readers that support\n ``preload=False``.\n\n Parameters\n ----------\n data : ndarray, shape (len(idx), n_samp)\n The data array. Should be modified inplace.\n idx : ndarray | slice\n The requested channel indices.\n offset : int\n Offset. Data should be stored in something like::\n\n data[:, offset:offset + (start - stop + 1)] = r[idx]\n fi : int\n The file index that must be read from.\n start : int\n The start sample in the given file.\n stop : int\n The stop sample in the given file (inclusive).\n cals : ndarray, shape (len(idx), 1)\n Channel calibrations (already sub-indexed).\n mult : ndarray, shape (len(idx), len(info['chs']) | None\n The compensation + projection + cals matrix, if applicable.\n \"\"\"\n raise NotImplementedError\n\n @verbose\n def preload_data(self, verbose=None):\n \"\"\"Preload raw data\n\n Parameters\n ----------\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Notes\n -----\n This function will preload raw data if it was not already preloaded.\n If data were already preloaded, it will do nothing.\n \"\"\"\n if not self.preload:\n self._preload_data(True)\n\n def _preload_data(self, preload):\n \"\"\"This function actually preloads the data\"\"\"\n data_buffer = preload if isinstance(preload, string_types) else None\n self._data = self._read_segment(data_buffer=data_buffer)[0]\n assert len(self._data) == self.info['nchan']\n self.preload = True\n self.close()\n\n def _update_times(self):\n \"\"\"Helper to update times\"\"\"\n self._times = np.arange(self.n_times) / float(self.info['sfreq'])\n # make it immutable\n self._times.flags.writeable = False\n\n @property\n def first_samp(self):\n return self._first_samps[0]\n\n @property\n def last_samp(self):\n return self.first_samp + sum(self._raw_lengths) - 1\n\n @property\n def _raw_lengths(self):\n return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)]\n\n def __del__(self):\n # remove file for memmap\n if hasattr(self, '_data') and hasattr(self._data, 'filename'):\n # First, close the file out; happens automatically on del\n filename = self._data.filename\n del self._data\n # Now file can be removed\n try:\n os.remove(filename)\n except OSError:\n pass # ignore file that no longer exists\n\n def __enter__(self):\n \"\"\" Entering with block \"\"\"\n return self\n\n def __exit__(self, exception_type, exception_val, trace):\n \"\"\" Exiting with block \"\"\"\n try:\n self.close()\n except:\n return exception_type, exception_val, trace\n\n def __hash__(self):\n if not self.preload:\n raise RuntimeError('Cannot hash raw unless preloaded')\n return object_hash(dict(info=self.info, data=self._data))\n\n def _parse_get_set_params(self, item):\n # make sure item is a tuple\n if not isinstance(item, tuple): # only channel selection passed\n item = (item, slice(None, None, None))\n\n if len(item) != 2: # should be channels and time instants\n raise RuntimeError(\"Unable to access raw data (need both channels \"\n \"and time)\")\n\n if isinstance(item[0], slice):\n start = item[0].start if item[0].start is not None else 0\n nchan = self.info['nchan']\n stop = item[0].stop if item[0].stop is not None else nchan\n step = item[0].step if item[0].step is not None else 1\n sel = list(range(start, stop, step))\n else:\n sel = item[0]\n\n if isinstance(item[1], slice):\n time_slice = item[1]\n start, stop, step = (time_slice.start, time_slice.stop,\n time_slice.step)\n else:\n item1 = item[1]\n # Let's do automated type conversion to integer here\n if np.array(item[1]).dtype.kind == 'i':\n item1 = int(item1)\n if isinstance(item1, (int, np.integer)):\n start, stop, step = item1, item1 + 1, 1\n else:\n raise ValueError('Must pass int or slice to __getitem__')\n\n if start is None:\n start = 0\n if (step is not None) and (step is not 1):\n raise ValueError('step needs to be 1 : %d given' % step)\n\n if isinstance(sel, (int, np.integer)):\n sel = np.array([sel])\n\n if sel is not None and len(sel) == 0:\n raise ValueError(\"Empty channel list\")\n\n return sel, start, stop\n\n def __getitem__(self, item):\n \"\"\"getting raw data content with python slicing\"\"\"\n sel, start, stop = self._parse_get_set_params(item)\n if self.preload:\n data, times = self._data[sel, start:stop], self.times[start:stop]\n else:\n data, times = self._read_segment(start=start, stop=stop, sel=sel,\n projector=self._projector,\n verbose=self.verbose)\n return data, times\n\n def __setitem__(self, item, value):\n \"\"\"setting raw data content with python slicing\"\"\"\n if not self.preload:\n raise RuntimeError('Modifying data of Raw is only supported '\n 'when preloading is used. Use preload=True '\n '(or string) in the constructor.')\n sel, start, stop = self._parse_get_set_params(item)\n # set the data\n self._data[sel, start:stop] = value\n\n def anonymize(self):\n \"\"\"Anonymize data\n\n This function will remove info['subject_info'] if it exists.\"\"\"\n self.info._anonymize()\n\n @verbose\n def apply_function(self, fun, picks, dtype, n_jobs, *args, **kwargs):\n \"\"\" Apply a function to a subset of channels.\n\n The function \"fun\" is applied to the channels defined in \"picks\". The\n data of the Raw object is modified inplace. If the function returns\n a different data type (e.g. numpy.complex) it must be specified using\n the dtype parameter, which causes the data type used for representing\n the raw data to change.\n\n The Raw object has to be constructed using preload=True (or string).\n\n Note: If n_jobs > 1, more memory is required as \"len(picks) * n_times\"\n additional time points need to be temporaily stored in memory.\n\n Note: If the data type changes (dtype != None), more memory is required\n since the original and the converted data needs to be stored in\n memory.\n\n Parameters\n ----------\n fun : function\n A function to be applied to the channels. The first argument of\n fun has to be a timeseries (numpy.ndarray). The function must\n return an numpy.ndarray with the same size as the input.\n picks : array-like of int | None\n Indices of channels to apply the function to. If None, all\n M-EEG channels are used.\n dtype : numpy.dtype\n Data type to use for raw data after applying the function. If None\n the data type is not modified.\n n_jobs: int\n Number of jobs to run in parallel.\n *args :\n Additional positional arguments to pass to fun (first pos. argument\n of fun is the timeseries of a channel).\n **kwargs :\n Keyword arguments to pass to fun. Note that if \"verbose\" is passed\n as a member of ``kwargs``, it will be consumed and will override\n the default mne-python verbose level (see mne.verbose).\n \"\"\"\n if not self.preload:\n raise RuntimeError('Raw data needs to be preloaded. Use '\n 'preload=True (or string) in the constructor.')\n if picks is None:\n picks = pick_types(self.info, meg=True, eeg=True, exclude=[])\n\n if not callable(fun):\n raise ValueError('fun needs to be a function')\n\n data_in = self._data\n if dtype is not None and dtype != self._data.dtype:\n self._data = self._data.astype(dtype)\n\n if n_jobs == 1:\n # modify data inplace to save memory\n for idx in picks:\n self._data[idx, :] = _check_fun(fun, data_in[idx, :],\n *args, **kwargs)\n else:\n # use parallel function\n parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)\n data_picks_new = parallel(p_fun(fun, data_in[p], *args, **kwargs)\n for p in picks)\n for pp, p in enumerate(picks):\n self._data[p, :] = data_picks_new[pp]\n\n @verbose\n def apply_hilbert(self, picks, envelope=False, n_jobs=1, verbose=None):\n \"\"\" Compute analytic signal or envelope for a subset of channels.\n\n If envelope=False, the analytic signal for the channels defined in\n \"picks\" is computed and the data of the Raw object is converted to\n a complex representation (the analytic signal is complex valued).\n\n If envelope=True, the absolute value of the analytic signal for the\n channels defined in \"picks\" is computed, resulting in the envelope\n signal.\n\n Note: DO NOT use envelope=True if you intend to compute an inverse\n solution from the raw data. If you want to compute the\n envelope in source space, use envelope=False and compute the\n envelope after the inverse solution has been obtained.\n\n Note: If envelope=False, more memory is required since the original\n raw data as well as the analytic signal have temporarily to\n be stored in memory.\n\n Note: If n_jobs > 1 and envelope=True, more memory is required as\n \"len(picks) * n_times\" additional time points need to be\n temporaily stored in memory.\n\n Parameters\n ----------\n picks : array-like of int\n Indices of channels to apply the function to.\n envelope : bool (default: False)\n Compute the envelope signal of each channel.\n n_jobs: int\n Number of jobs to run in parallel.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n Defaults to self.verbose.\n\n Notes\n -----\n The analytic signal \"x_a(t)\" of \"x(t)\" is::\n\n x_a = F^{-1}(F(x) 2U) = x + i y\n\n where \"F\" is the Fourier transform, \"U\" the unit step function,\n and \"y\" the Hilbert transform of \"x\". One usage of the analytic\n signal is the computation of the envelope signal, which is given by\n \"e(t) = abs(x_a(t))\". Due to the linearity of Hilbert transform and the\n MNE inverse solution, the enevlope in source space can be obtained\n by computing the analytic signal in sensor space, applying the MNE\n inverse, and computing the envelope in source space.\n \"\"\"\n if envelope:\n self.apply_function(_envelope, picks, None, n_jobs)\n else:\n from scipy.signal import hilbert\n self.apply_function(hilbert, picks, np.complex64, n_jobs)\n\n @verbose\n def filter(self, l_freq, h_freq, picks=None, filter_length='10s',\n l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,\n method='fft', iir_params=None, verbose=None):\n \"\"\"Filter a subset of channels.\n\n Applies a zero-phase low-pass, high-pass, band-pass, or band-stop\n filter to the channels selected by \"picks\". The data of the Raw\n object is modified inplace.\n\n The Raw object has to be constructed using preload=True (or string).\n\n l_freq and h_freq are the frequencies below which and above which,\n respectively, to filter out of the data. Thus the uses are:\n\n l_freq < h_freq: band-pass filter\n l_freq > h_freq: band-stop filter\n l_freq is not None, h_freq is None: high-pass filter\n l_freq is None, h_freq is not None: low-pass filter\n\n If n_jobs > 1, more memory is required as \"len(picks) * n_times\"\n additional time points need to be temporarily stored in memory.\n\n self.info['lowpass'] and self.info['highpass'] are only updated\n with picks=None.\n\n Parameters\n ----------\n l_freq : float | None\n Low cut-off frequency in Hz. If None the data are only low-passed.\n h_freq : float | None\n High cut-off frequency in Hz. If None the data are only\n high-passed.\n picks : array-like of int | None\n Indices of channels to filter. If None only the data (MEG/EEG)\n channels will be filtered.\n filter_length : str (Default: '10s') | int | None\n Length of the filter to use. If None or \"len(x) < filter_length\",\n the filter length used is len(x). Otherwise, if int, overlap-add\n filtering with a filter of the specified length in samples) is\n used (faster for long signals). If str, a human-readable time in\n units of \"s\" or \"ms\" (e.g., \"10s\" or \"5500ms\") will be converted\n to the shortest power-of-two length at least that duration.\n Not used for 'iir' filters.\n l_trans_bandwidth : float\n Width of the transition band at the low cut-off frequency in Hz\n (high pass or cutoff 1 in bandpass). Not used if 'order' is\n specified in iir_params.\n h_trans_bandwidth : float\n Width of the transition band at the high cut-off frequency in Hz\n (low pass or cutoff 2 in bandpass). Not used if 'order' is\n specified in iir_params.\n n_jobs : int | str\n Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda\n is installed properly, CUDA is initialized, and method='fft'.\n method : str\n 'fft' will use overlap-add FIR filtering, 'iir' will use IIR\n forward-backward filtering (via filtfilt).\n iir_params : dict | None\n Dictionary of parameters to use for IIR filtering.\n See mne.filter.construct_iir_filter for details. If iir_params\n is None and method=\"iir\", 4th order Butterworth will be used.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n Defaults to self.verbose.\n \"\"\"\n if verbose is None:\n verbose = self.verbose\n fs = float(self.info['sfreq'])\n if l_freq == 0:\n l_freq = None\n if h_freq is not None and h_freq > (fs / 2.):\n h_freq = None\n if l_freq is not None and not isinstance(l_freq, float):\n l_freq = float(l_freq)\n if h_freq is not None and not isinstance(h_freq, float):\n h_freq = float(h_freq)\n\n if not self.preload:\n raise RuntimeError('Raw data needs to be preloaded to filter. Use '\n 'preload=True (or string) in the constructor.')\n if picks is None:\n if 'ICA ' in ','.join(self.ch_names):\n pick_parameters = dict(misc=True, ref_meg=False)\n else:\n pick_parameters = dict(meg=True, eeg=True, ref_meg=False)\n picks = pick_types(self.info, exclude=[], **pick_parameters)\n # let's be safe.\n if len(picks) < 1:\n raise RuntimeError('Could not find any valid channels for '\n 'your Raw object. Please contact the '\n 'MNE-Python developers.')\n\n # update info if filter is applied to all data channels,\n # and it's not a band-stop filter\n if h_freq is not None:\n if (l_freq is None or l_freq < h_freq) and \\\n (self.info[\"lowpass\"] is None or\n h_freq < self.info['lowpass']):\n self.info['lowpass'] = h_freq\n if l_freq is not None:\n if (h_freq is None or l_freq < h_freq) and \\\n (self.info[\"highpass\"] is None or\n l_freq > self.info['highpass']):\n self.info['highpass'] = l_freq\n if l_freq is None and h_freq is not None:\n logger.info('Low-pass filtering at %0.2g Hz' % h_freq)\n low_pass_filter(self._data, fs, h_freq,\n filter_length=filter_length,\n trans_bandwidth=h_trans_bandwidth, method=method,\n iir_params=iir_params, picks=picks, n_jobs=n_jobs,\n copy=False)\n if l_freq is not None and h_freq is None:\n logger.info('High-pass filtering at %0.2g Hz' % l_freq)\n high_pass_filter(self._data, fs, l_freq,\n filter_length=filter_length,\n trans_bandwidth=l_trans_bandwidth, method=method,\n iir_params=iir_params, picks=picks, n_jobs=n_jobs,\n copy=False)\n if l_freq is not None and h_freq is not None:\n if l_freq < h_freq:\n logger.info('Band-pass filtering from %0.2g - %0.2g Hz'\n % (l_freq, h_freq))\n self._data = band_pass_filter(\n self._data, fs, l_freq, h_freq,\n filter_length=filter_length,\n l_trans_bandwidth=l_trans_bandwidth,\n h_trans_bandwidth=h_trans_bandwidth,\n method=method, iir_params=iir_params, picks=picks,\n n_jobs=n_jobs, copy=False)\n else:\n logger.info('Band-stop filtering from %0.2g - %0.2g Hz'\n % (h_freq, l_freq))\n self._data = band_stop_filter(\n self._data, fs, h_freq, l_freq,\n filter_length=filter_length,\n l_trans_bandwidth=h_trans_bandwidth,\n h_trans_bandwidth=l_trans_bandwidth, method=method,\n iir_params=iir_params, picks=picks, n_jobs=n_jobs,\n copy=False)\n\n @verbose\n def notch_filter(self, freqs, picks=None, filter_length='10s',\n notch_widths=None, trans_bandwidth=1.0, n_jobs=1,\n method='fft', iir_params=None,\n mt_bandwidth=None, p_value=0.05, verbose=None):\n \"\"\"Notch filter a subset of channels.\n\n Applies a zero-phase notch filter to the channels selected by\n \"picks\". The data of the Raw object is modified inplace.\n\n The Raw object has to be constructed using preload=True (or string).\n\n Note: If n_jobs > 1, more memory is required as \"len(picks) * n_times\"\n additional time points need to be temporaily stored in memory.\n\n Parameters\n ----------\n freqs : float | array of float | None\n Specific frequencies to filter out from data, e.g.,\n np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in\n Europe. None can only be used with the mode 'spectrum_fit',\n where an F test is used to find sinusoidal components.\n picks : array-like of int | None\n Indices of channels to filter. If None only the data (MEG/EEG)\n channels will be filtered.\n filter_length : str (Default: '10s') | int | None\n Length of the filter to use. If None or \"len(x) < filter_length\",\n the filter length used is len(x). Otherwise, if int, overlap-add\n filtering with a filter of the specified length in samples) is\n used (faster for long signals). If str, a human-readable time in\n units of \"s\" or \"ms\" (e.g., \"10s\" or \"5500ms\") will be converted\n to the shortest power-of-two length at least that duration.\n Not used for 'iir' filters.\n notch_widths : float | array of float | None\n Width of each stop band (centred at each freq in freqs) in Hz.\n If None, freqs / 200 is used.\n trans_bandwidth : float\n Width of the transition band in Hz.\n n_jobs : int | str\n Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda\n is installed properly, CUDA is initialized, and method='fft'.\n method : str\n 'fft' will use overlap-add FIR filtering, 'iir' will use IIR\n forward-backward filtering (via filtfilt). 'spectrum_fit' will\n use multi-taper estimation of sinusoidal components.\n iir_params : dict | None\n Dictionary of parameters to use for IIR filtering.\n See mne.filter.construct_iir_filter for details. If iir_params\n is None and method=\"iir\", 4th order Butterworth will be used.\n mt_bandwidth : float | None\n The bandwidth of the multitaper windowing function in Hz.\n Only used in 'spectrum_fit' mode.\n p_value : float\n p-value to use in F-test thresholding to determine significant\n sinusoidal components to remove when method='spectrum_fit' and\n freqs=None. Note that this will be Bonferroni corrected for the\n number of frequencies, so large p-values may be justified.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n Defaults to self.verbose.\n\n Notes\n -----\n For details, see mne.filter.notch_filter.\n \"\"\"\n if verbose is None:\n verbose = self.verbose\n fs = float(self.info['sfreq'])\n if picks is None:\n if 'ICA ' in ','.join(self.ch_names):\n pick_parameters = dict(misc=True)\n else:\n pick_parameters = dict(meg=True, eeg=True)\n picks = pick_types(self.info, exclude=[], **pick_parameters)\n # let's be safe.\n if len(picks) < 1:\n raise RuntimeError('Could not find any valid channels for '\n 'your Raw object. Please contact the '\n 'MNE-Python developers.')\n if not self.preload:\n raise RuntimeError('Raw data needs to be preloaded to filter. Use '\n 'preload=True (or string) in the constructor.')\n\n self._data = notch_filter(self._data, fs, freqs,\n filter_length=filter_length,\n notch_widths=notch_widths,\n trans_bandwidth=trans_bandwidth,\n method=method, iir_params=iir_params,\n mt_bandwidth=mt_bandwidth, p_value=p_value,\n picks=picks, n_jobs=n_jobs, copy=False)\n\n @verbose\n def resample(self, sfreq, npad=100, window='boxcar',\n stim_picks=None, n_jobs=1, verbose=None):\n \"\"\"Resample data channels.\n\n Resamples all channels. The data of the Raw object is modified inplace.\n\n The Raw object has to be constructed using preload=True (or string).\n\n WARNING: The intended purpose of this function is primarily to speed\n up computations (e.g., projection calculation) when precise timing\n of events is not required, as downsampling raw data effectively\n jitters trigger timings. It is generally recommended not to epoch\n downsampled data, but instead epoch and then downsample, as epoching\n downsampled data jitters triggers.\n\n Parameters\n ----------\n sfreq : float\n New sample rate to use.\n npad : int\n Amount to pad the start and end of the data.\n window : string or tuple\n Window to use in resampling. See scipy.signal.resample.\n stim_picks : array of int | None\n Stim channels. These channels are simply subsampled or\n supersampled (without applying any filtering). This reduces\n resampling artifacts in stim channels, but may lead to missing\n triggers. If None, stim channels are automatically chosen using\n mne.pick_types(raw.info, meg=False, stim=True, exclude=[]).\n n_jobs : int | str\n Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda\n is installed properly and CUDA is initialized.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n Defaults to self.verbose.\n\n Notes\n -----\n For some data, it may be more accurate to use npad=0 to reduce\n artifacts. This is dataset dependent -- check your data!\n \"\"\"\n if not self.preload:\n raise RuntimeError('Can only resample preloaded data')\n sfreq = float(sfreq)\n o_sfreq = float(self.info['sfreq'])\n\n offsets = np.concatenate(([0], np.cumsum(self._raw_lengths)))\n new_data = list()\n # set up stim channel processing\n if stim_picks is None:\n stim_picks = pick_types(self.info, meg=False, ref_meg=False,\n stim=True, exclude=[])\n stim_picks = np.asanyarray(stim_picks)\n ratio = sfreq / o_sfreq\n for ri in range(len(self._raw_lengths)):\n data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]]\n new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,\n n_jobs=n_jobs))\n new_ntimes = new_data[ri].shape[1]\n\n # Now deal with the stim channels. In empirical testing, it was\n # faster to resample all channels (above) and then replace the\n # stim channels than it was to only resample the proper subset\n # of channels and then use np.insert() to restore the stims\n\n # figure out which points in old data to subsample\n # protect against out-of-bounds, which can happen (having\n # one sample more than expected) due to padding\n stim_inds = np.minimum(np.floor(np.arange(new_ntimes) /\n ratio).astype(int),\n data_chunk.shape[1] - 1)\n for sp in stim_picks:\n new_data[ri][sp] = data_chunk[[sp]][:, stim_inds]\n\n self._first_samps[ri] = int(self._first_samps[ri] * ratio)\n self._last_samps[ri] = self._first_samps[ri] + new_ntimes - 1\n self._raw_lengths[ri] = new_ntimes\n\n # adjust affected variables\n self._data = np.concatenate(new_data, axis=1)\n self.info['sfreq'] = sfreq\n self._update_times()\n\n def crop(self, tmin=0.0, tmax=None, copy=True):\n \"\"\"Crop raw data file.\n\n Limit the data from the raw file to go between specific times. Note\n that the new tmin is assumed to be t=0 for all subsequently called\n functions (e.g., time_as_index, or Epochs). New first_samp and\n last_samp are set accordingly. And data are modified in-place when\n called with copy=False.\n\n Parameters\n ----------\n tmin : float\n New start time in seconds (must be >= 0).\n tmax : float | None\n New end time in seconds of the data (cannot exceed data duration).\n copy : bool\n If False Raw is cropped in place.\n\n Returns\n -------\n raw : instance of Raw\n The cropped raw object.\n \"\"\"\n raw = self.copy() if copy is True else self\n max_time = (raw.n_times - 1) / raw.info['sfreq']\n if tmax is None:\n tmax = max_time\n\n if tmin > tmax:\n raise ValueError('tmin must be less than tmax')\n if tmin < 0.0:\n raise ValueError('tmin must be >= 0')\n elif tmax > max_time:\n raise ValueError('tmax must be less than or equal to the max raw '\n 'time (%0.4f sec)' % max_time)\n\n smin, smax = np.where(_time_mask(self.times, tmin, tmax))[0][[0, -1]]\n cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,\n dtype='int')))\n cumul_lens = np.cumsum(cumul_lens)\n keepers = np.logical_and(np.less(smin, cumul_lens[1:]),\n np.greater_equal(smax, cumul_lens[:-1]))\n keepers = np.where(keepers)[0]\n raw._first_samps = np.atleast_1d(raw._first_samps[keepers])\n # Adjust first_samp of first used file!\n raw._first_samps[0] += smin - cumul_lens[keepers[0]]\n raw._last_samps = np.atleast_1d(raw._last_samps[keepers])\n raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax\n raw._raw_extras = [r for ri, r in enumerate(raw._raw_extras)\n if ri in keepers]\n if raw.preload:\n # slice and copy to avoid the reference to large array\n raw._data = raw._data[:, smin:smax + 1].copy()\n raw._update_times()\n return raw\n\n @verbose\n def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,\n drop_small_buffer=False, proj=False, fmt='single',\n overwrite=False, split_size='2GB', verbose=None):\n \"\"\"Save raw data to file\n\n Parameters\n ----------\n fname : string\n File name of the new dataset. This has to be a new filename\n unless data have been preloaded. Filenames should end with\n raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif\n or raw_tsss.fif.gz.\n picks : array-like of int | None\n Indices of channels to include. If None all channels are kept.\n tmin : float | None\n Time in seconds of first sample to save. If None first sample\n is used.\n tmax : float | None\n Time in seconds of last sample to save. If None last sample\n is used.\n buffer_size_sec : float | None\n Size of data chunks in seconds. If None, the buffer size of\n the original file is used.\n drop_small_buffer : bool\n Drop or not the last buffer. It is required by maxfilter (SSS)\n that only accepts raw files with buffers of the same size.\n proj : bool\n If True the data is saved with the projections applied (active).\n Note: If apply_proj() was used to apply the projections,\n the projectons will be active even if proj is False.\n fmt : str\n Format to use to save raw data. Valid options are 'double',\n 'single', 'int', and 'short' for 64- or 32-bit float, or 32- or\n 16-bit integers, respectively. It is **strongly** recommended to\n use 'single', as this is backward-compatible, and is standard for\n maintaining precision. Note that using 'short' or 'int' may result\n in loss of precision, complex data cannot be saved as 'short',\n and neither complex data types nor real data stored as 'double'\n can be loaded with the MNE command-line tools. See raw.orig_format\n to determine the format the original data were stored in.\n overwrite : bool\n If True, the destination file (if it exists) will be overwritten.\n If False (default), an error will be raised if the file exists.\n split_size : string | int\n Large raw files are automatically split into multiple pieces. This\n parameter specifies the maximum size of each piece. If the\n parameter is an integer, it specifies the size in Bytes. It is\n also possible to pass a human-readable string, e.g., 100MB.\n Note: Due to FIFF file limitations, the maximum split size is 2GB.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n Defaults to self.verbose.\n\n Notes\n -----\n If Raw is a concatenation of several raw files, **be warned** that\n only the measurement information from the first raw file is stored.\n This likely means that certain operations with external tools may not\n work properly on a saved concatenated file (e.g., probably some\n or all forms of SSS). It is recommended not to concatenate and\n then save raw files for this reason.\n \"\"\"\n check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',\n 'raw.fif.gz', 'raw_sss.fif.gz',\n 'raw_tsss.fif.gz'))\n\n if isinstance(split_size, string_types):\n exp = dict(MB=20, GB=30).get(split_size[-2:], None)\n if exp is None:\n raise ValueError('split_size has to end with either'\n '\"MB\" or \"GB\"')\n split_size = int(float(split_size[:-2]) * 2 ** exp)\n\n if split_size > 2147483648:\n raise ValueError('split_size cannot be larger than 2GB')\n\n fname = op.realpath(fname)\n if not self.preload and fname in self._filenames:\n raise ValueError('You cannot save data to the same file.'\n ' Please use a different filename.')\n\n if self.preload:\n if np.iscomplexobj(self._data):\n warnings.warn('Saving raw file with complex data. Loading '\n 'with command-line MNE tools will not work.')\n\n type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,\n int=FIFF.FIFFT_INT,\n single=FIFF.FIFFT_FLOAT,\n double=FIFF.FIFFT_DOUBLE)\n if fmt not in type_dict.keys():\n raise ValueError('fmt must be \"short\", \"int\", \"single\", '\n 'or \"double\"')\n reset_dict = dict(short=False, int=False, single=True, double=True)\n reset_range = reset_dict[fmt]\n data_type = type_dict[fmt]\n\n data_test = self[0, 0][0]\n if fmt == 'short' and np.iscomplexobj(data_test):\n raise ValueError('Complex data must be saved as \"single\" or '\n '\"double\", not \"short\"')\n\n # check for file existence\n _check_fname(fname, overwrite)\n\n if proj:\n info = copy.deepcopy(self.info)\n projector, info = setup_proj(info)\n activate_proj(info['projs'], copy=False)\n else:\n info = self.info\n projector = None\n\n # set the correct compensation grade and make inverse compensator\n inv_comp = None\n if self.comp is not None:\n print(self.comp)\n inv_comp = linalg.inv(self.comp)\n set_current_comp(info, self._orig_comp_grade)\n\n #\n # Set up the reading parameters\n #\n\n # Convert to samples\n start = int(np.floor(tmin * self.info['sfreq']))\n\n if tmax is None:\n stop = self.last_samp + 1 - self.first_samp\n else:\n stop = int(np.floor(tmax * self.info['sfreq']))\n\n if buffer_size_sec is None:\n if 'buffer_size_sec' in self.info:\n buffer_size_sec = self.info['buffer_size_sec']\n else:\n buffer_size_sec = 10.0\n buffer_size = int(np.ceil(buffer_size_sec * self.info['sfreq']))\n\n # write the raw file\n _write_raw(fname, self, info, picks, fmt, data_type, reset_range,\n start, stop, buffer_size, projector, inv_comp,\n drop_small_buffer, split_size, 0, None)\n\n def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,\n bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),\n event_color='cyan', scalings=None, remove_dc=True, order='type',\n show_options=False, title=None, show=True, block=False,\n highpass=None, lowpass=None, filtorder=4, clipping=None):\n \"\"\"Plot raw data\n\n Parameters\n ----------\n events : array | None\n Events to show with vertical bars.\n duration : float\n Time window (sec) to plot in a given time.\n start : float\n Initial time to show (can be changed dynamically once plotted).\n n_channels : int\n Number of channels to plot at once.\n bgcolor : color object\n Color of the background.\n color : dict | color object | None\n Color for the data traces. If None, defaults to::\n\n dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',\n emg='k', ref_meg='steelblue', misc='k', stim='k',\n resp='k', chpi='k')\n\n bad_color : color object\n Color to make bad channels.\n event_color : color object\n Color to use for events.\n scalings : dict | None\n Scale factors for the traces. If None, defaults to::\n\n dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,\n emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,\n resp=1, chpi=1e-4)\n\n remove_dc : bool\n If True remove DC component when plotting data.\n order : 'type' | 'original' | array\n Order in which to plot data. 'type' groups by channel type,\n 'original' plots in the order of ch_names, array gives the\n indices to use in plotting.\n show_options : bool\n If True, a dialog for options related to projection is shown.\n title : str | None\n The title of the window. If None, and either the filename of the\n raw object or '<unknown>' will be displayed as title.\n show : bool\n Show figures if True\n block : bool\n Whether to halt program execution until the figure is closed.\n Useful for setting bad channels on the fly (click on line).\n May not work on all systems / platforms.\n highpass : float | None\n Highpass to apply when displaying data.\n lowpass : float | None\n Lowpass to apply when displaying data.\n filtorder : int\n Filtering order. Note that for efficiency and simplicity,\n filtering during plotting uses forward-backward IIR filtering,\n so the effective filter order will be twice ``filtorder``.\n Filtering the lines for display may also produce some edge\n artifacts (at the left and right edges) of the signals\n during display. Filtering requires scipy >= 0.10.\n clipping : str | None\n If None, channels are allowed to exceed their designated bounds in\n the plot. If \"clamp\", then values are clamped to the appropriate\n range for display, creating step-like artifacts. If \"transparent\",\n then excessive values are not shown, creating gaps in the traces.\n\n Returns\n -------\n fig : Instance of matplotlib.figure.Figure\n Raw traces.\n\n Notes\n -----\n The arrow keys (up/down/left/right) can typically be used to navigate\n between channels and time ranges, but this depends on the backend\n matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).\n To mark or un-mark a channel as bad, click on the rather flat segments\n of a channel's time series. The changes will be reflected immediately\n in the raw object's ``raw.info['bads']`` entry.\n \"\"\"\n return plot_raw(self, events, duration, start, n_channels, bgcolor,\n color, bad_color, event_color, scalings, remove_dc,\n order, show_options, title, show, block, highpass,\n lowpass, filtorder, clipping)\n\n @verbose\n def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,\n proj=False, n_fft=2048, picks=None, ax=None,\n color='black', area_mode='std', area_alpha=0.33,\n n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):\n \"\"\"Plot the power spectral density across channels\n\n Parameters\n ----------\n tmin : float\n Start time for calculations.\n tmax : float\n End time for calculations.\n fmin : float\n Start frequency to consider.\n fmax : float\n End frequency to consider.\n proj : bool\n Apply projection.\n n_fft : int\n Number of points to use in Welch FFT calculations.\n picks : array-like of int | None\n List of channels to use. Cannot be None if `ax` is supplied. If\n both `picks` and `ax` are None, separate subplots will be created\n for each standard channel type (`mag`, `grad`, and `eeg`).\n ax : instance of matplotlib Axes | None\n Axes to plot into. If None, axes will be created.\n color : str | tuple\n A matplotlib-compatible color to use.\n area_mode : str | None\n How to plot area. If 'std', the mean +/- 1 STD (across channels)\n will be plotted. If 'range', the min and max (across channels)\n will be plotted. Bad channels will be excluded from these\n calculations. If None, no area will be plotted.\n area_alpha : float\n Alpha for the area.\n n_overlap : int\n The number of points of overlap between blocks. The default value\n is 0 (no overlap).\n dB : bool\n If True, transform data to decibels.\n show : bool\n Call pyplot.show() at the end.\n n_jobs : int\n Number of jobs to run in parallel.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n fig : instance of matplotlib figure\n Figure distributing one image per channel across sensor topography.\n \"\"\"\n return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,\n proj=proj, n_fft=n_fft, picks=picks, ax=ax,\n color=color, area_mode=area_mode,\n area_alpha=area_alpha, n_overlap=n_overlap,\n dB=dB, show=show, n_jobs=n_jobs)\n\n def time_as_index(self, times, use_first_samp=False):\n \"\"\"Convert time to indices\n\n Parameters\n ----------\n times : list-like | float | int\n List of numbers or a number representing points in time.\n use_first_samp : boolean\n If True, time is treated as relative to the session onset, else\n as relative to the recording onset.\n\n Returns\n -------\n index : ndarray\n Indices corresponding to the times supplied.\n \"\"\"\n return _time_as_index(times, self.info['sfreq'], self.first_samp,\n use_first_samp)\n\n def index_as_time(self, index, use_first_samp=False):\n \"\"\"Convert indices to time\n\n Parameters\n ----------\n index : list-like | int\n List of ints or int representing points in time.\n use_first_samp : boolean\n If True, the time returned is relative to the session onset, else\n relative to the recording onset.\n\n Returns\n -------\n times : ndarray\n Times corresponding to the index supplied.\n \"\"\"\n return _index_as_time(index, self.info['sfreq'], self.first_samp,\n use_first_samp)\n\n def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,\n return_singular=False, picks=None, scalings='norm'):\n \"\"\"Estimate rank of the raw data\n\n This function is meant to provide a reasonable estimate of the rank.\n The true rank of the data depends on many factors, so use at your\n own risk.\n\n Parameters\n ----------\n tstart : float\n Start time to use for rank estimation. Default is 0.0.\n tstop : float | None\n End time to use for rank estimation. Default is 30.0.\n If None, the end time of the raw file is used.\n tol : float\n Tolerance for singular values to consider non-zero in\n calculating the rank. The singular values are calculated\n in this method such that independent data are expected to\n have singular value around one.\n return_singular : bool\n If True, also return the singular values that were used\n to determine the rank.\n picks : array_like of int, shape (n_selected_channels,)\n The channels to be considered for rank estimation.\n If None (default) meg and eeg channels are included.\n scalings : dict | 'norm'\n To achieve reliable rank estimation on multiple sensors,\n sensors have to be rescaled. This parameter controls the\n rescaling. If dict, it will update the\n following dict of defaults:\n\n dict(mag=1e11, grad=1e9, eeg=1e5)\n\n If 'norm' data will be scaled by internally computed\n channel-wise norms.\n Defaults to 'norm'.\n\n Returns\n -------\n rank : int\n Estimated rank of the data.\n s : array\n If return_singular is True, the singular values that were\n thresholded to determine the rank are also returned.\n\n Notes\n -----\n If data are not pre-loaded, the appropriate data will be loaded\n by this function (can be memory intensive).\n\n Projectors are not taken into account unless they have been applied\n to the data using apply_proj(), since it is not always possible\n to tell whether or not projectors have been applied previously.\n\n Bad channels will be excluded from calculations.\n \"\"\"\n from ..cov import _estimate_rank_meeg_signals\n\n start = max(0, self.time_as_index(tstart)[0])\n if tstop is None:\n stop = self.n_times - 1\n else:\n stop = min(self.n_times - 1, self.time_as_index(tstop)[0])\n tslice = slice(start, stop + 1)\n if picks is None:\n picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,\n exclude='bads')\n # ensure we don't get a view of data\n if len(picks) == 1:\n return 1.0, 1.0\n # this should already be a copy, so we can overwrite it\n data = self[picks, tslice][0]\n out = _estimate_rank_meeg_signals(\n data, pick_info(self.info, picks),\n scalings=scalings, tol=tol, return_singular=return_singular,\n copy=False)\n\n return out\n\n @property\n def ch_names(self):\n \"\"\"Channel names\"\"\"\n return self.info['ch_names']\n\n @property\n def times(self):\n \"\"\"Time points\"\"\"\n return self._times\n\n @property\n def n_times(self):\n \"\"\"Number of time points\"\"\"\n return self.last_samp - self.first_samp + 1\n\n def __len__(self):\n return self.n_times\n\n def load_bad_channels(self, bad_file=None, force=False):\n \"\"\"\n Mark channels as bad from a text file, in the style\n (mostly) of the C function mne_mark_bad_channels\n\n Parameters\n ----------\n bad_file : string\n File name of the text file containing bad channels\n If bad_file = None, bad channels are cleared, but this\n is more easily done directly as raw.info['bads'] = [].\n\n force : boolean\n Whether or not to force bad channel marking (of those\n that exist) if channels are not found, instead of\n raising an error.\n \"\"\"\n\n if bad_file is not None:\n # Check to make sure bad channels are there\n names = frozenset(self.info['ch_names'])\n with open(bad_file) as fid:\n bad_names = [l for l in fid.read().splitlines() if l]\n names_there = [ci for ci in bad_names if ci in names]\n count_diff = len(bad_names) - len(names_there)\n\n if count_diff > 0:\n if not force:\n raise ValueError('Bad channels from:\\n%s\\n not found '\n 'in:\\n%s' % (bad_file,\n self._filenames[0]))\n else:\n warnings.warn('%d bad channels from:\\n%s\\nnot found '\n 'in:\\n%s' % (count_diff, bad_file,\n self._filenames[0]))\n self.info['bads'] = names_there\n else:\n self.info['bads'] = []\n\n def append(self, raws, preload=None):\n \"\"\"Concatenate raw instances as if they were continuous\n\n Parameters\n ----------\n raws : list, or Raw instance\n list of Raw instances to concatenate to the current instance\n (in order), or a single raw instance to concatenate.\n preload : bool, str, or None (default None)\n Preload data into memory for data manipulation and faster indexing.\n If True, the data will be preloaded into memory (fast, requires\n large amount of memory). If preload is a string, preload is the\n file name of a memory-mapped file which is used to store the data\n on the hard drive (slower, requires less memory). If preload is\n None, preload=True or False is inferred using the preload status\n of the raw files passed in.\n \"\"\"\n from .fiff.raw import RawFIF\n from .kit.kit import RawKIT\n from .edf.edf import RawEDF\n\n if not isinstance(raws, list):\n raws = [raws]\n\n # make sure the raws are compatible\n all_raws = [self]\n all_raws += raws\n _check_raw_compatibility(all_raws)\n\n # deal with preloading data first (while files are separate)\n all_preloaded = self.preload and all(r.preload for r in raws)\n if preload is None:\n if all_preloaded:\n preload = True\n else:\n preload = False\n\n if not preload and not isinstance(self, (RawFIF, RawKIT, RawEDF)):\n raise RuntimeError('preload must be True to concatenate '\n 'files unless they are FIF, KIT, or EDF')\n if preload is False:\n if self.preload:\n self._data = None\n self.preload = False\n else:\n # do the concatenation ourselves since preload might be a string\n nchan = self.info['nchan']\n c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])\n nsamp = c_ns[-1]\n\n if not self.preload:\n this_data = self._read_segment()[0]\n else:\n this_data = self._data\n\n # allocate the buffer\n if isinstance(preload, string_types):\n _data = np.memmap(preload, mode='w+', dtype=this_data.dtype,\n shape=(nchan, nsamp))\n else:\n _data = np.empty((nchan, nsamp), dtype=this_data.dtype)\n\n _data[:, 0:c_ns[0]] = this_data\n\n for ri in range(len(raws)):\n if not raws[ri].preload:\n # read the data directly into the buffer\n data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]\n raws[ri]._read_segment(data_buffer=data_buffer)\n else:\n _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data\n self._data = _data\n self.preload = True\n\n # now combine information from each raw file to construct new self\n for r in raws:\n self._first_samps = np.r_[self._first_samps, r._first_samps]\n self._last_samps = np.r_[self._last_samps, r._last_samps]\n self._raw_extras += r._raw_extras\n self._filenames += r._filenames\n self._update_times()\n\n def close(self):\n \"\"\"Clean up the object.\n\n Does nothing for objects that close their file descriptors.\n Things like RawFIF will override this method.\n \"\"\"\n pass\n\n def copy(self):\n \"\"\" Return copy of Raw instance\n \"\"\"\n return deepcopy(self)\n\n def __repr__(self):\n s = ', '.join(('%r' % op.basename(self._filenames[0]),\n \"n_channels x n_times : %s x %s\"\n % (len(self.ch_names), self.n_times)))\n s = \"n_channels x n_times : %s x %s\" % (len(self.info['ch_names']),\n self.n_times)\n return \"<%s | %s>\" % (self.__class__.__name__, s)\n\n def add_events(self, events, stim_channel=None):\n \"\"\"Add events to stim channel\n\n Parameters\n ----------\n events : ndarray, shape (n_events, 3)\n Events to add. The first column specifies the sample number of\n each event, the second column is ignored, and the third column\n provides the event value. If events already exist in the Raw\n instance at the given sample numbers, the event values will be\n added together.\n stim_channel : str | None\n Name of the stim channel to add to. If None, the config variable\n 'MNE_STIM_CHANNEL' is used. If this is not found, it will default\n to 'STI 014'.\n\n Notes\n -----\n Data must be preloaded in order to add events.\n \"\"\"\n if not self.preload:\n raise RuntimeError('cannot add events unless data are preloaded')\n events = np.asarray(events)\n if events.ndim != 2 or events.shape[1] != 3:\n raise ValueError('events must be shape (n_events, 3)')\n stim_channel = _get_stim_channel(stim_channel)\n pick = pick_channels(self.ch_names, stim_channel)\n if len(pick) == 0:\n raise ValueError('Channel %s not found' % stim_channel)\n pick = pick[0]\n idx = events[:, 0].astype(int)\n if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):\n raise ValueError('event sample numbers must be between %s and %s'\n % (self.first_samp, self.last_samp))\n if not all(idx == events[:, 0]):\n raise ValueError('event sample numbers must be integers')\n self._data[pick, idx - self.first_samp] += events[:, 2]\n\n\ndef _allocate_data(data, data_buffer, data_shape, dtype):\n if data is None:\n # if not already done, allocate array with right type\n if isinstance(data_buffer, string_types):\n # use a memmap\n data = np.memmap(data_buffer, mode='w+',\n dtype=dtype, shape=data_shape)\n else:\n data = np.zeros(data_shape, dtype=dtype)\n return data\n\n\ndef _time_as_index(times, sfreq, first_samp=0, use_first_samp=False):\n \"\"\"Convert time to indices\n\n Parameters\n ----------\n times : list-like | float | int\n List of numbers or a number representing points in time.\n use_first_samp : boolean\n If True, time is treated as relative to the session onset, else\n as relative to the recording onset.\n\n Returns\n -------\n index : ndarray\n Indices corresponding to the times supplied.\n \"\"\"\n index = np.atleast_1d(times) * sfreq\n index -= (first_samp if use_first_samp else 0)\n return index.astype(int)\n\n\ndef _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):\n \"\"\"Convert indices to time\n\n Parameters\n ----------\n index : list-like | int\n List of ints or int representing points in time.\n use_first_samp : boolean\n If True, the time returned is relative to the session onset, else\n relative to the recording onset.\n\n Returns\n -------\n times : ndarray\n Times corresponding to the index supplied.\n \"\"\"\n times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)\n return times / sfreq\n\n\nclass _RawShell():\n \"\"\"Used for creating a temporary raw object\"\"\"\n\n def __init__(self):\n self.first_samp = None\n self.last_samp = None\n self._cals = None\n self._rawdir = None\n self._projector = None\n\n @property\n def n_times(self):\n return self.last_samp - self.first_samp + 1\n\n\n###############################################################################\n# Writing\ndef _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,\n stop, buffer_size, projector, inv_comp, drop_small_buffer,\n split_size, part_idx, prev_fname):\n \"\"\"Write raw file with splitting\n \"\"\"\n\n if part_idx > 0:\n # insert index in filename\n path, base = op.split(fname)\n idx = base.find('.')\n use_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,\n base[idx + 1:]))\n else:\n use_fname = fname\n logger.info('Writing %s' % use_fname)\n\n meas_id = info['meas_id']\n\n fid, cals = _start_writing_raw(use_fname, info, picks, data_type,\n reset_range)\n\n first_samp = raw.first_samp + start\n if first_samp != 0:\n write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)\n\n # previous file name and id\n if part_idx > 0 and prev_fname is not None:\n start_block(fid, FIFF.FIFFB_REF)\n write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)\n write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)\n if meas_id is not None:\n write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)\n write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)\n end_block(fid, FIFF.FIFFB_REF)\n\n pos_prev = None\n for first in range(start, stop, buffer_size):\n last = first + buffer_size\n if last >= stop:\n last = stop + 1\n\n if picks is None:\n data, times = raw[:, first:last]\n else:\n data, times = raw[picks, first:last]\n\n if projector is not None:\n data = np.dot(projector, data)\n\n if ((drop_small_buffer and (first > start) and\n (len(times) < buffer_size))):\n logger.info('Skipping data chunk due to small buffer ... '\n '[done]')\n break\n logger.info('Writing ...')\n\n if pos_prev is None:\n pos_prev = fid.tell()\n\n _write_raw_buffer(fid, data, cals, fmt, inv_comp)\n\n pos = fid.tell()\n this_buff_size_bytes = pos - pos_prev\n if this_buff_size_bytes > split_size / 2:\n raise ValueError('buffer size is too large for the given split'\n 'size: decrease \"buffer_size_sec\" or increase'\n '\"split_size\".')\n if pos > split_size:\n raise logger.warning('file is larger than \"split_size\"')\n\n # Split files if necessary, leave some space for next file info\n if pos >= split_size - this_buff_size_bytes - 2 ** 20:\n next_fname, next_idx = _write_raw(\n fname, raw, info, picks, fmt,\n data_type, reset_range, first + buffer_size, stop, buffer_size,\n projector, inv_comp, drop_small_buffer, split_size,\n part_idx + 1, use_fname)\n\n start_block(fid, FIFF.FIFFB_REF)\n write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)\n write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))\n if meas_id is not None:\n write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)\n write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)\n end_block(fid, FIFF.FIFFB_REF)\n break\n\n pos_prev = pos\n\n logger.info('Closing %s [done]' % use_fname)\n if info.get('maxshield', False):\n end_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)\n else:\n end_block(fid, FIFF.FIFFB_RAW_DATA)\n end_block(fid, FIFF.FIFFB_MEAS)\n end_file(fid)\n return use_fname, part_idx\n\n\ndef _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,\n reset_range=True):\n \"\"\"Start write raw data in file\n\n Data will be written in float\n\n Parameters\n ----------\n name : string\n Name of the file to create.\n info : dict\n Measurement info.\n sel : array of int, optional\n Indices of channels to include. By default all channels are included.\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.\n reset_range : bool\n If True, the info['chs'][k]['range'] parameter will be set to unity.\n\n Returns\n -------\n fid : file\n The file descriptor.\n cals : list\n calibration factors.\n \"\"\"\n #\n # Create the file and save the essentials\n #\n fid = start_file(name)\n start_block(fid, FIFF.FIFFB_MEAS)\n write_id(fid, FIFF.FIFF_BLOCK_ID)\n if info['meas_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])\n #\n # Measurement info\n #\n info = copy.deepcopy(info)\n if sel is not None:\n info['chs'] = [info['chs'][k] for k in sel]\n info['nchan'] = len(sel)\n\n ch_names = [c['ch_name'] for c in info['chs']] # name of good channels\n comps = copy.deepcopy(info['comps'])\n for c in comps:\n row_idx = [k for k, n in enumerate(c['data']['row_names'])\n if n in ch_names]\n row_names = [c['data']['row_names'][i] for i in row_idx]\n rowcals = c['rowcals'][row_idx]\n c['rowcals'] = rowcals\n c['data']['nrow'] = len(row_names)\n c['data']['row_names'] = row_names\n c['data']['data'] = c['data']['data'][row_idx]\n info['comps'] = comps\n\n cals = []\n for k in range(info['nchan']):\n #\n # Scan numbers may have been messed up\n #\n info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format\n if reset_range is True:\n info['chs'][k]['range'] = 1.0\n cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])\n\n write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)\n\n #\n # Start the raw data\n #\n if info.get('maxshield', False):\n start_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)\n else:\n start_block(fid, FIFF.FIFFB_RAW_DATA)\n\n return fid, cals\n\n\ndef _write_raw_buffer(fid, buf, cals, fmt, inv_comp):\n \"\"\"Write raw buffer\n\n Parameters\n ----------\n fid : file descriptor\n an open raw data file.\n buf : array\n The buffer to write.\n cals : array\n Calibration factors.\n fmt : str\n 'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit\n float for each item. This will be doubled for complex datatypes. Note\n that short and int formats cannot be used for complex data.\n inv_comp : array | None\n The CTF compensation matrix used to revert compensation\n change when reading.\n \"\"\"\n if buf.shape[0] != len(cals):\n raise ValueError('buffer and calibration sizes do not match')\n\n if fmt not in ['short', 'int', 'single', 'double']:\n raise ValueError('fmt must be \"short\", \"single\", or \"double\"')\n\n if np.isrealobj(buf):\n if fmt == 'short':\n write_function = write_dau_pack16\n elif fmt == 'int':\n write_function = write_int\n elif fmt == 'single':\n write_function = write_float\n else:\n write_function = write_double\n else:\n if fmt == 'single':\n write_function = write_complex64\n elif fmt == 'double':\n write_function = write_complex128\n else:\n raise ValueError('only \"single\" and \"double\" supported for '\n 'writing complex data')\n\n if inv_comp is not None:\n buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)\n else:\n buf = buf / np.ravel(cals)[:, None]\n\n write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)\n\n\ndef _envelope(x):\n \"\"\" Compute envelope signal \"\"\"\n from scipy.signal import hilbert\n return np.abs(hilbert(x))\n\n\ndef _check_raw_compatibility(raw):\n \"\"\"Check to make sure all instances of Raw\n in the input list raw have compatible parameters\"\"\"\n for ri in range(1, len(raw)):\n if not isinstance(raw[ri], type(raw[0])):\n raise ValueError('raw[%d] type must match' % ri)\n if not raw[ri].info['nchan'] == raw[0].info['nchan']:\n raise ValueError('raw[%d][\\'info\\'][\\'nchan\\'] must match' % ri)\n if not raw[ri].info['bads'] == raw[0].info['bads']:\n raise ValueError('raw[%d][\\'info\\'][\\'bads\\'] must match' % ri)\n if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:\n raise ValueError('raw[%d][\\'info\\'][\\'sfreq\\'] must match' % ri)\n if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):\n raise ValueError('raw[%d][\\'info\\'][\\'ch_names\\'] must match' % ri)\n if not all(raw[ri]._cals == raw[0]._cals):\n raise ValueError('raw[%d]._cals must match' % ri)\n if len(raw[0].info['projs']) != len(raw[ri].info['projs']):\n raise ValueError('SSP projectors in raw files must be the same')\n if not all(_proj_equal(p1, p2) for p1, p2 in\n zip(raw[0].info['projs'], raw[ri].info['projs'])):\n raise ValueError('SSP projectors in raw files must be the same')\n if not all(r.orig_format == raw[0].orig_format for r in raw):\n warnings.warn('raw files do not all have the same data format, '\n 'could result in precision mismatch. Setting '\n 'raw.orig_format=\"unknown\"')\n raw[0].orig_format = 'unknown'\n\n\ndef concatenate_raws(raws, preload=None, events_list=None):\n \"\"\"Concatenate raw instances as if they were continuous. Note that raws[0]\n is modified in-place to achieve the concatenation.\n\n Parameters\n ----------\n raws : list\n list of Raw instances to concatenate (in order).\n preload : bool, or None\n If None, preload status is inferred using the preload status of the\n raw files passed in. True or False sets the resulting raw file to\n have or not have data preloaded.\n events_list : None | list\n The events to concatenate. Defaults to None.\n\n Returns\n -------\n raw : instance of Raw\n The result of the concatenation (first Raw instance passed in).\n events : ndarray of int, shape (n events, 3)\n The events. Only returned if `event_list` is not None.\n \"\"\"\n if events_list is not None:\n if len(events_list) != len(raws):\n raise ValueError('`raws` and `event_list` are required '\n 'to be of the same length')\n first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])\n events = concatenate_events(events_list, first, last)\n raws[0].append(raws[1:], preload)\n\n if events_list is None:\n return raws[0]\n else:\n return raws[0], events\n\n\ndef _check_update_montage(info, montage):\n \"\"\" Helper function for eeg readers to add montage\"\"\"\n if montage is not None:\n if not isinstance(montage, (str, Montage)):\n err = (\"Montage must be str, None, or instance of Montage. \"\n \"%s was provided\" % type(montage))\n raise TypeError(err)\n if montage is not None:\n if isinstance(montage, str):\n montage = read_montage(montage)\n _set_montage(info, montage)\n\n missing_positions = []\n exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,\n FIFF.FIFFV_STIM_CH)\n for ch in info['chs']:\n if not ch['kind'] in exclude:\n if np.unique(ch['loc']).size == 1:\n missing_positions.append(ch['ch_name'])\n\n # raise error if positions are missing\n if missing_positions:\n err = (\"The following positions are missing from the montage \"\n \"definitions: %s. If those channels lack positions \"\n \"because they are EOG channels use the eog parameter.\"\n % str(missing_positions))\n raise KeyError(err)\n"
] |
[
[
"numpy.dot",
"numpy.asarray",
"numpy.cumsum",
"numpy.round",
"numpy.concatenate",
"numpy.any",
"numpy.iscomplexobj",
"numpy.where",
"scipy.signal.hilbert",
"numpy.hstack",
"numpy.unique",
"numpy.less",
"numpy.arange",
"numpy.atleast_1d",
"numpy.greater_equal",
"numpy.ceil",
"numpy.asanyarray",
"numpy.diff",
"numpy.isrealobj",
"scipy.linalg.inv",
"numpy.ravel",
"numpy.repeat",
"numpy.zeros",
"numpy.nonzero",
"numpy.min",
"numpy.memmap",
"numpy.floor",
"numpy.array",
"numpy.tile",
"numpy.empty"
]
] |
mcognetta/federated
|
[
"fa0c1a00b5d77768bc2f38f503f3ef1a65693945",
"fa0c1a00b5d77768bc2f38f503f3ef1a65693945"
] |
[
"tensorflow_federated/python/core/impl/tensorflow_serialization.py",
"tensorflow_federated/python/examples/mnist/models.py"
] |
[
"# Lint as: python3\n# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for serializing TensorFlow computations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport os\nimport os.path\nimport shutil\nimport sys\nimport tempfile\nimport types\nimport zipfile\n\nimport six\nimport tensorflow as tf\n\nfrom tensorflow_federated.proto.v0 import computation_pb2 as pb\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.common_libs import serialization_utils\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.impl import context_stack_base\nfrom tensorflow_federated.python.core.impl import tf_computation_context\nfrom tensorflow_federated.python.core.impl import type_serialization\nfrom tensorflow_federated.python.core.impl.utils import function_utils\nfrom tensorflow_federated.python.core.impl.utils import tensorflow_utils\nfrom tensorflow_federated.python.tensorflow_libs import graph_keys\n\n\nclass SerializationError(Exception):\n \"\"\"Error raised during value serialization or deserialization.\"\"\"\n pass\n\n\ndef finalize_binding(binding, tensor_info_map):\n \"\"\"Mutates binding by filling in actual tensor names.\n\n Args:\n binding: A `pb.Binding` or one of its submessages.\n tensor_info_map: A dict mapping the placeholder `tensor_name`s found in\n `binding` to final tensor names.\n \"\"\"\n if not binding:\n if tensor_info_map:\n raise ValueError('Empty binding, but non-empty tensor_info_map {}'.format(\n tensor_info_map))\n return\n if isinstance(binding, pb.TensorFlow.Binding):\n sub_binding = getattr(binding, binding.WhichOneof('binding'))\n finalize_binding(sub_binding, tensor_info_map)\n\n elif isinstance(binding, pb.TensorFlow.TensorBinding):\n name = binding.tensor_name\n if name not in tensor_info_map:\n raise ValueError(\n 'Did not find tensor_name {} in provided tensor_info_map with keys {}'\n .format(name, list(tensor_info_map.keys())))\n binding.tensor_name = tensor_info_map[name].name\n elif isinstance(binding, pb.TensorFlow.NamedTupleBinding):\n for sub_binding in binding.element:\n finalize_binding(sub_binding, tensor_info_map)\n else:\n raise ValueError('Unsupported binding type {}'.format(\n py_typecheck.type_string(type(binding))))\n\n\ndef serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):\n \"\"\"Serializes the 'target' as a TF computation with a given parameter type.\n\n Args:\n target: The entity to convert into and serialize as a TF computation. This\n can currently only be a Python function or `tf.function`, with arguments\n matching the 'parameter_type'.\n parameter_type: The parameter type specification if the target accepts a\n parameter, or `None` if the target doesn't declare any parameters. Either\n an instance of `types.Type`, or something that's convertible to it by\n `types.to_type()`.\n unpack: Whether to always unpack the parameter_type. Necessary for support\n of polymorphic tf2_computations.\n\n Returns:\n The constructed `pb.Computation` instance with the `pb.TensorFlow` variant\n set.\n\n Raises:\n TypeError: If the arguments are of the wrong types.\n ValueError: If the signature of the target is not compatible with the given\n parameter type.\n \"\"\"\n py_typecheck.check_callable(target)\n parameter_type = computation_types.to_type(parameter_type)\n argspec = function_utils.get_argspec(target)\n if argspec.args and parameter_type is None:\n raise ValueError(\n 'Expected the target to declare no parameters, found {!r}.'.format(\n argspec.args))\n\n # In the codepath for TF V1 based serialization (tff.tf_computation),\n # we get the \"wrapped\" function to serialize. Here, target is the\n # raw function to be wrapped; however, we still need to know if\n # the parameter_type should be unpacked into multiple args and kwargs\n # in order to construct the TensorSpecs to be passed in the call\n # to get_concrete_fn below.\n unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)\n arg_typespecs, kwarg_typespecs, parameter_binding = (\n tensorflow_utils.get_tf_typespec_and_binding(\n parameter_type, arg_names=argspec.args, unpack=unpack))\n\n # Pseudo-global to be appended to once when target_poly below is traced.\n type_and_binding_slot = []\n\n # N.B. To serialize a tf.function or eager python code,\n # the return type must be a flat list, tuple, or dict. However, the\n # tff.tf_computation must be able to handle structured inputs and outputs.\n # Thus, we intercept the result of calling the original target fn, introspect\n # its structure to create a result_type and bindings, and then return a\n # flat dict output. It is this new \"unpacked\" tf.function that we will\n # serialize using tf.saved_model.save.\n #\n # TODO(b/117428091): The return type limitation is primarily a limitation of\n # SignatureDefs and therefore of the signatures argument to\n # tf.saved_model.save. tf.functions attached to objects and loaded back with\n # tf.saved_model.load can take/return nests; this might offer a better\n # approach to the one taken here.\n\n @tf.function\n def target_poly(*args, **kwargs):\n result = target(*args, **kwargs)\n result_dict, result_type, result_binding = (\n tensorflow_utils.get_tf2_result_dict_and_binding(result))\n assert not type_and_binding_slot\n # A \"side channel\" python output.\n type_and_binding_slot.append((result_type, result_binding))\n return result_dict\n\n # Triggers tracing so that type_and_binding_slot is filled.\n cc_fn = target_poly.get_concrete_function(*arg_typespecs, **kwarg_typespecs)\n assert len(type_and_binding_slot) == 1\n result_type, result_binding = type_and_binding_slot[0]\n\n # N.B. Note that cc_fn does *not* accept the same args and kwargs as the\n # Python target_poly; instead, it must be called with **kwargs based on the\n # unique names embedded in the TensorSpecs inside arg_typespecs and\n # kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping\n # between these tensor names and the components of the (possibly nested) TFF\n # input type. When cc_fn is serialized, concrete tensors for each input are\n # introduced, and the call finalize_binding(parameter_binding,\n # sigs['serving_default'].inputs) updates the bindings to reference these\n # concrete tensors.\n\n # Associate vars with unique names and explicitly attach to the Checkpoint:\n var_dict = {\n 'var{:02d}'.format(i): v for i, v in enumerate(cc_fn.graph.variables)\n }\n saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)\n\n try:\n # TODO(b/122081673): All we really need is the meta graph def, we could\n # probably just load that directly, e.g., using parse_saved_model from\n # tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to\n # depend on that presumably non-public symbol. Perhaps TF can expose a way\n # to just get the MetaGraphDef directly without saving to a tempfile? This\n # looks like a small change to v2.saved_model.save().\n outdir = tempfile.mkdtemp('savedmodel')\n tf.saved_model.save(saveable, outdir, signatures=cc_fn)\n\n graph = tf.Graph()\n with tf.compat.v1.Session(graph=graph) as sess:\n mgd = tf.compat.v1.saved_model.load(\n sess, tags=[tf.saved_model.SERVING], export_dir=outdir)\n finally:\n shutil.rmtree(outdir)\n sigs = mgd.signature_def\n\n # TODO(b/123102455): Figure out how to support the init_op. The meta graph def\n # contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It\n # probably won't do what we want, because it will want to read from\n # Checkpoints, not just run Variable initializerse (?). The right solution may\n # be to grab the target_poly.get_initialization_function(), and save a sig for\n # that.\n\n # Now, traverse the signature from the MetaGraphDef to find\n # find the actual tensor names and write them into the bindings.\n finalize_binding(parameter_binding, sigs['serving_default'].inputs)\n finalize_binding(result_binding, sigs['serving_default'].outputs)\n\n annotated_type = computation_types.FunctionType(parameter_type, result_type)\n\n return pb.Computation(\n type=pb.Type(\n function=pb.FunctionType(\n parameter=type_serialization.serialize_type(parameter_type),\n result=type_serialization.serialize_type(result_type))),\n tensorflow=pb.TensorFlow(\n graph_def=serialization_utils.pack_graph_def(mgd.graph_def),\n parameter=parameter_binding,\n result=result_binding)), annotated_type\n\n\ndef serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):\n \"\"\"Serializes the 'target' as a TF computation with a given parameter type.\n\n See also `serialize_tf2_as_tf_computation` for TensorFlow 2\n serialization.\n\n Args:\n target: The entity to convert into and serialize as a TF computation. This\n can currently only be a Python function. In the future, we will add here\n support for serializing the various kinds of non-eager and eager\n functions, and eventually aim at full support for and compliance with TF\n 2.0. This function is currently required to declare either zero parameters\n if `parameter_type` is `None`, or exactly one parameter if it's not\n `None`. The nested structure of this parameter must correspond to the\n structure of the 'parameter_type'. In the future, we may support targets\n with multiple args/keyword args (to be documented in the API and\n referenced from here).\n parameter_type: The parameter type specification if the target accepts a\n parameter, or `None` if the target doesn't declare any parameters. Either\n an instance of `types.Type`, or something that's convertible to it by\n `types.to_type()`.\n context_stack: The context stack to use.\n\n Returns:\n A tuple of (`pb.Computation`, `tff.Type`), where the computation contains\n the instance with the `pb.TensorFlow` variant set, and the type is an\n instance of `tff.Type`, potentially including Python container annotations,\n for use by TensorFlow computation wrappers.\n\n Raises:\n TypeError: If the arguments are of the wrong types.\n ValueError: If the signature of the target is not compatible with the given\n parameter type.\n \"\"\"\n # TODO(b/113112108): Support a greater variety of target type signatures,\n # with keyword args or multiple args corresponding to elements of a tuple.\n # Document all accepted forms with examples in the API, and point to there\n # from here.\n\n py_typecheck.check_type(target, types.FunctionType)\n py_typecheck.check_type(context_stack, context_stack_base.ContextStack)\n parameter_type = computation_types.to_type(parameter_type)\n argspec = inspect.getargspec(target) # pylint: disable=deprecated-method\n\n with tf.Graph().as_default() as graph:\n args = []\n if parameter_type is not None:\n if len(argspec.args) != 1:\n raise ValueError(\n 'Expected the target to declare exactly one parameter, found {!r}.'\n .format(argspec.args))\n parameter_name = argspec.args[0]\n parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(\n parameter_name, parameter_type, graph)\n args.append(parameter_value)\n else:\n if argspec.args:\n raise ValueError(\n 'Expected the target to declare no parameters, found {!r}.'.format(\n argspec.args))\n parameter_binding = None\n context = tf_computation_context.TensorFlowComputationContext(graph)\n with context_stack.install(context):\n result = target(*args)\n\n # TODO(b/122081673): This needs to change for TF 2.0. We may also\n # want to allow the person creating a tff.tf_computation to specify\n # a different initializer; e.g., if it is known that certain\n # variables will be assigned immediately to arguments of the function,\n # then it is wasteful to initialize them before this.\n #\n # The following is a bit of a work around: the collections below may\n # contain variables more than once, hence we throw into a set. TFF needs\n # to ensure all variables are initialized, but not all variables are\n # always in the collections we expect. tff.learning._KerasModel tries to\n # pull Keras variables (that may or may not be in GLOBAL_VARIABLES) into\n # VARS_FOR_TFF_TO_INITIALIZE for now.\n all_variables = set(tf.compat.v1.global_variables() +\n tf.compat.v1.local_variables() +\n tf.compat.v1.get_collection(\n graph_keys.GraphKeys.VARS_FOR_TFF_TO_INITIALIZE))\n if all_variables:\n # Use a readable but not-too-long name for the init_op.\n name = 'init_op_for_' + '_'.join(\n [v.name.replace(':0', '') for v in all_variables])\n if len(name) > 50:\n name = 'init_op_for_{}_variables'.format(len(all_variables))\n with tf.control_dependencies(context.init_ops):\n # Before running the main new init op, run any initializers for sub-\n # computations from context.init_ops. Variables from import_graph_def\n # will not make it into the global collections, and so will not be\n # initialized without this code path.\n init_op_name = tf.compat.v1.initializers.variables(\n all_variables, name=name).name\n elif context.init_ops:\n init_op_name = tf.group(\n *context.init_ops, name='subcomputation_init_ops').name\n else:\n init_op_name = None\n\n result_type, result_binding = tensorflow_utils.capture_result_from_graph(\n result, graph)\n\n annotated_type = computation_types.FunctionType(parameter_type, result_type)\n\n return pb.Computation(\n type=pb.Type(\n function=pb.FunctionType(\n parameter=type_serialization.serialize_type(parameter_type),\n result=type_serialization.serialize_type(result_type))),\n tensorflow=pb.TensorFlow(\n graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),\n parameter=parameter_binding,\n result=result_binding,\n initialize_op=init_op_name)), annotated_type\n\n\n# The maximum size allowed for serialized sequence values. Sequence that\n# serialize to values larger than this will result in errors being raised. This\n# likely occurs when the sequence is dependent on, and thus pulling in, many of\n# variables from the graph.\nDEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES = 20 * (1024**2) # 20 MB\n\n\n# TODO(b/137880330): there is likely opportunity here to share implementation\n# with the serialization happening in\n# `tensorflow_serialization.serialize_tf2_as_tf_computation()`. It would be good\n# to sync with TF team about options for ensuring graph-only (variable-less)\n# serializations.\ndef serialize_dataset(\n dataset,\n max_serialized_size_bytes=DEFAULT_MAX_SERIALIZED_SEQUENCE_SIZE_BYTES):\n \"\"\"Serializes a `tf.data.Dataset` value into a `bytes` object.\n\n Args:\n dataset: A `tf.data.Dataset`.\n max_serialized_size_bytes: An `int` size in bytes designating the threshold\n on when to raise an error if the resulting serialization is too big.\n\n Returns:\n A `bytes` object that can be sent to\n `tensorflow_serialization.deserialize_dataset` to recover the original\n `tf.data.Dataset`.\n\n Raises:\n SerializationError: if there was an error in TensorFlow during\n serialization.\n \"\"\"\n py_typecheck.check_type(dataset, tf.data.Dataset)\n module = tf.Module()\n module.dataset = dataset\n module.dataset_fn = tf.function(lambda: module.dataset, input_signature=())\n\n try:\n temp_dir = tempfile.mkdtemp('dataset')\n tf.saved_model.save(module, temp_dir, signatures={})\n\n fd, temp_zip = tempfile.mkstemp('zip')\n os.close(fd)\n with zipfile.ZipFile(temp_zip, 'w') as z:\n for topdir, _, filenames in tf.io.gfile.walk(temp_dir):\n dest_dir = topdir[len(temp_dir):]\n for filename in filenames:\n z.write(\n os.path.join(topdir, filename), os.path.join(dest_dir, filename))\n with open(temp_zip, 'rb') as z:\n zip_bytes = z.read()\n except Exception as e: # pylint: disable=broad-except\n six.reraise(\n SerializationError,\n SerializationError('Error serializing tff.Sequence value. '\n 'Inner error: {!s}'.format(e)),\n sys.exc_info()[2])\n finally:\n tf.io.gfile.rmtree(temp_dir)\n tf.io.gfile.remove(temp_zip)\n\n if len(zip_bytes) > max_serialized_size_bytes:\n raise ValueError('Serialized size of Dataset ({:d} bytes) exceeds maximum '\n 'allowed ({:d} bytes)'.format(\n len(zip_bytes), max_serialized_size_bytes))\n return zip_bytes\n\n\ndef deserialize_dataset(serialized_bytes):\n \"\"\"Deserializes a `bytes` object to a `tf.data.Dataset`.\n\n Args:\n serialized_bytes: `bytes` object produced by\n `tensorflow_serialization.serialize_dataset`\n\n Returns:\n A `tf.data.Dataset` instance.\n\n Raises:\n SerializationError: if there was an error in TensorFlow during\n serialization.\n \"\"\"\n py_typecheck.check_type(serialized_bytes, bytes)\n try:\n fd, temp_zip = tempfile.mkstemp('zip')\n os.close(fd)\n with open(temp_zip, 'wb') as f:\n f.write(serialized_bytes)\n\n temp_dir = tempfile.mkdtemp('dataset')\n with zipfile.ZipFile(temp_zip, 'r') as z:\n z.extractall(path=temp_dir)\n\n loaded = tf.compat.v2.saved_model.load(temp_dir)\n ds = loaded.dataset_fn()\n except Exception as e: # pylint: disable=broad-except\n six.reraise(\n SerializationError,\n SerializationError('Error deserializing tff.Sequence value. '\n 'Inner error: {!s}'.format(e)),\n sys.exc_info()[2])\n finally:\n tf.io.gfile.rmtree(temp_dir)\n tf.io.gfile.remove(temp_zip)\n return ds\n",
"# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"An example of an MNIST model function for use with TensorFlow Federated.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\n\nclass NumExamplesCounter(tf.keras.metrics.Sum):\n \"\"\"A `tf.keras.metrics.Metric` that counts the number of examples seen.\"\"\"\n\n def __init__(self, name='num_examples', dtype=tf.int64): # pylint: disable=useless-super-delegation\n super(NumExamplesCounter, self).__init__(name, dtype)\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n return super(NumExamplesCounter,\n self).update_state(tf.shape(y_pred)[0], sample_weight)\n\n\ndef create_simple_keras_model(learning_rate=0.1):\n \"\"\"Returns an instance of `tf.Keras.Model` with just one dense layer.\n\n Args:\n learning_rate: The learning rate to use with the SGD optimizer.\n\n Returns:\n An instance of `tf.Keras.Model`.\n \"\"\"\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(784,)),\n tf.keras.layers.Dense(10, tf.nn.softmax, kernel_initializer='zeros')\n ])\n\n model.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n optimizer=tf.keras.optimizers.SGD(learning_rate),\n metrics=[\n tf.keras.metrics.SparseCategoricalAccuracy(),\n NumExamplesCounter()\n ])\n return model\n\n\ndef keras_dataset_from_emnist(dataset):\n \"\"\"Converts `dataset` for use with the output of `create_simple_keras_model`.\n\n Args:\n dataset: An instance of `tf.data.Dataset` to read from.\n\n Returns:\n An instance of `tf.data.Dataset` after conversion.\n \"\"\"\n\n def map_fn(example):\n return collections.OrderedDict([('x', tf.reshape(example['pixels'], [-1])),\n ('y', example['label'])])\n\n return dataset.map(map_fn)\n\n\ndef create_keras_model(compile_model=False):\n \"\"\"Returns an instance of `tf.keras.Model` for use with the MNIST example.\n\n This code is based on the following target, which unfortunately cannot be\n imported as it is a Python binary, not a library:\n\n https://github.com/tensorflow/models/blob/master/official/mnist/mnist.py\n\n Args:\n compile_model: If True, compile the model with a basic optimizer and loss.\n\n Returns:\n A `tf.keras.Model`.\n \"\"\"\n # TODO(b/120157713): Find a way to import this code.\n data_format = 'channels_last'\n input_shape = [28, 28, 1]\n l = tf.keras.layers\n initializer = tf.keras.initializers.RandomNormal(seed=0)\n max_pool = l.MaxPooling2D((2, 2), (2, 2),\n padding='same',\n data_format=data_format)\n model = tf.keras.Sequential([\n l.Reshape(target_shape=input_shape, input_shape=(28 * 28,)),\n l.Conv2D(\n 32,\n 5,\n padding='same',\n data_format=data_format,\n activation=tf.nn.relu,\n kernel_initializer=initializer), max_pool,\n l.Conv2D(\n 64,\n 5,\n padding='same',\n data_format=data_format,\n activation=tf.nn.relu,\n kernel_initializer=initializer), max_pool,\n l.Flatten(),\n l.Dense(1024, activation=tf.nn.relu, kernel_initializer=initializer),\n l.Dropout(0.4, seed=1),\n l.Dense(10, kernel_initializer=initializer)\n ])\n if compile_model:\n model.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n optimizer=tf.keras.optimizers.SGD(learning_rate=0.1))\n return model\n\n\nBatch = collections.namedtuple('Batch', ['x', 'y']) # pylint: disable=invalid-name\n\n\ndef create_random_batch():\n \"\"\"Returns an instance of `Batch` populated with random tensors.\"\"\"\n return Batch(\n x=tf.random.uniform(tf.TensorShape([1, 784]), dtype=tf.float32),\n y=tf.constant(1, dtype=tf.int64, shape=[1, 1]))\n\n\ndef model_fn():\n \"\"\"Constructs the MNIST model wrapped for use with TensorFlow Federated.\n\n The model constructed by this function can be passed as an argument to\n `tff.learning.build_federated_averaging_process` to create a federated\n training process.\n\n Returns:\n An instance of `tff.learning.Model` that represents a trainable model.\n \"\"\"\n keras_model = create_keras_model(compile_model=True)\n dummy_batch = create_random_batch()\n return tff.learning.from_compiled_keras_model(keras_model, dummy_batch)\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.compat.v1.local_variables",
"tensorflow.control_dependencies",
"tensorflow.io.gfile.walk",
"tensorflow.train.Checkpoint",
"tensorflow.compat.v1.global_variables",
"tensorflow.saved_model.save",
"tensorflow.Module",
"tensorflow.compat.v2.saved_model.load",
"tensorflow.function",
"tensorflow.io.gfile.remove",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.get_collection",
"tensorflow.io.gfile.rmtree",
"tensorflow.compat.v1.saved_model.load",
"tensorflow.compat.v1.initializers.variables",
"tensorflow.group"
],
[
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.optimizers.SGD"
]
] |
imadmali/data-manipulation
|
[
"9888e78b535768c5a38b8a4ffb5a4f48309f4831"
] |
[
"data-manipulation/src/dm_pyspark.py"
] |
[
"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, lit, when, sum, max, lag, DataFrame, udf\nfrom pyspark.sql.window import Window\nfrom pyspark.sql.types import StringType\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import pandas_udf\nimport numpy as np\nimport pandas as pd\nfrom functools import reduce\n\nspark = SparkSession.builder.appName('data-manipulation').getOrCreate()\n\n### READ DATA\n\nfact_table = spark.read.csv('./data/fact_table.csv', inferSchema=True, header=True)\ndim_table = spark.read.csv('./data/dim_table.csv', inferSchema=True, header=True)\n\n### SCHEMA\n\nfact_table.printSchema()\n\n### RENAME\n\n# one column\nfact_table.withColumnRenamed('id', 'identifier').show()\n\n# multiple columns\ncolumns = ['v0','v1','v2']\nfact_table.select(['id'] + [col(c).alias('field_' + c) for c in columns]).show()\n\n### CREATE/DROP COLUMNS\n\n# create\nfact_table.withColumn('new_column', lit('foo')).show()\nfact_table.withColumn('new_column', col('v1') + 1).show()\n\n# drop\nfact_table.drop('v0').show()\n\n### SELECT\n\n# option 1\nfact_table.select('id','v0').show()\n\n# option 2\ncolumn_names = ['id','v0']\nfact_table.select(*column_names).show()\n\n### CONDITIONS (CASE STATEMENTS)\n\n# simple\nfact_table.withColumn('new_column',\n when(col('v2')=='Y', 1).otherwise(0)).show()\n\n# case statement\nfact_table.withColumn('new_column',\n when((col('id') == 'A') & (col('v0') < 0), 'Y').\\\n when((col('id').isin(['B','D','E'])) & (col('v0') > 0), 'N').\\\n otherwise(None)).show()\n\n### SORTING\n\nfact_table.sort(['id','v0'], ascending=[True,False]).show()\n\n### FILTER/WHERE\n\n# filter\nfact_table.filter(col('v0')>0).show()\n\n# filter using list\nfact_table.filter(col('id').isin(['A','B'])).show()\nfact_table.filter(~col('id').isin(['A','B'])).show()\n\n# filter nulls\nfact_table.filter(col('id').isNull()).show()\nfact_table.filter(col('id').isNotNull()).show()\n\n# filter regex\nfact_table.filter(col('id').rlike('A|B')).show()\n\n### GROUP BY\n\nfact_table.groupBy('id').agg(sum('v0').alias('sum_v0'),\n sum('v1').alias('sum_v1'),\n max('v1').alias('max_v1')).show()\n\n### WINDOW\n\n# lag window\nwindow_spec = Window.partitionBy('id').orderBy('v0')\nfact_table.withColumn('new_column', lag('v0', 1).over(window_spec)).show()\n\n# window sum\nwindow_spec = Window.partitionBy('id').orderBy('v0').rowsBetween(-1, Window.currentRow)\nfact_table.withColumn('roll_sum_v0', sum('v0').over(window_spec))\n\n# cumulative sum\nwindow_spec = Window.partitionBy('id').orderBy('v0').rowsBetween(Window.unboundedPreceding, Window.currentRow)\nfact_table.withColumn('cum_sum_v0', sum('v0').over(window_spec)).show()\n\n### PIVOT\n\nfact_table.groupBy('id').pivot('v2').sum('v1').fillna(0).show()\n\n### JOIN\n\nfact_table.join(dim_table, on=[fact_table.id==dim_table.identifier], how='left').show()\n\n### UNION\n\n# two tables\nDataFrame.union(fact_table, fact_table)\n\n# more than two tables\nreduce(DataFrame.union, [fact_table, fact_table, fact_table]).show()\n\n### UDF\n\ndef udf_f(id, v0):\n if (id == 'A') and (v0 <0):\n return('Y')\n elif (id in ['A','B','D','E']) and (v0 > 0):\n return('N')\n else:\n return(None)\n\nudf_f_reg = udf(udf_f, StringType())\n\nfact_table.withColumn('new_column', udf_f_reg('id', 'v0')).show()\n\n### UDAF\n\n@pandas_udf(\"double\")\ndef udaf_f(x: pd.Series) -> float:\n return(np.sum(x)/len(x))\n\nfact_table.groupBy('id').agg(udaf_f('v0').alias('mean')).show()\n"
] |
[
[
"numpy.sum"
]
] |
isseikz/StarTracker
|
[
"01c1dfcf8c9a6886acfa18c038acc723f56cc94d"
] |
[
"main.py"
] |
[
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport http\n\nimport time\nimport io\nimport cv2\n\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nimport json\nimport urllib.parse\nfrom time import sleep\n\nimport PD\nimport PID\n\nimport serial\n\nimport binmom\nimport controlViaSerial as cvs\nimport gainAdjuster as ga\nimport msearch\n\ndef sendData(steps, com):\n # TODO: implementation\n \"\"\"convert the steps into packet data and send it\"\"\"\n if steps[0] < 0:\n dir = b'\\x00'\n else:\n dir = b'\\x01'\n\n\n if abs(steps[0]) > 250:\n phase = int(250)\n else:\n phase = int(round(abs(steps[0])))\n\n data = bytearray()\n phaseBytes = phase.to_bytes(1,'big')\n data =b'\\x11\\x22\\x33\\x44\\x10\\x02' + dir + phaseBytes + b'\\r\\n'\n\n com.write(data)\n pass\n\ndef controlMotor1(error, com, threshold):\n if error[0] > threshold:\n yaw = b'\\x01'\n elif error[0] < -threshold:\n yaw = b'\\x00'\n else:\n yaw = b'\\x10'\n pass\n\n if error[1] > threshold*2:\n pitch = b'\\x01'\n elif error[1] < -threshold*2:\n pitch = b'\\x00'\n else:\n pitch = b'\\x10'\n pass\n\n sentData = b'\\x11\\x22\\x33\\x44\\x10\\x02'+yaw+pitch +b'\\r\\n'\n com.write(sentData)\n return True\n\ndef controlMotorPD(controller, error, com):\n \"\"\"send data to ESP32 with desired steps calculated by PD control.\"\"\"\n controller.updateData(error)\n ctrl = controller.getControlParam()\n sendData(ctrl,com)\n\n\ndef controlMotorPID(controller, error, com):\n \"\"\"send data to ESP32 with desired steps calculated by PID control.\"\"\"\n controller.updateData(error)\n ctrl = controller.getControlParam()\n sendData(ctrl,com)\n\n\ndef run():\n thresholdCTRL = 10 #threshold for driving motor or not[pixel]\n\n uri, host, url, cameraHost, cameraUrl = msearch.urlLiveview()\n print(f'connect to http://{host}/{url}')\n\n print(cameraHost)\n print(cameraUrl)\n control = http.client.HTTPConnection(cameraHost)\n jsonDict = {\"method\":\"startLiveview\",\"params\":[],\"id\":1,\"version\":\"1.0\"}\n jsonData = json.dumps(jsonDict)\n print(jsonData)\n control.request(\"POST\", cameraUrl, body=jsonData)\n # control.request(\"POST\", cameraUrl)\n conres = control.getresponse()\n control.close()\n jsonRes = json.load(conres)\n print(jsonRes)\n print(urllib.parse.unquote(jsonRes['result'][0]))\n\n control = http.client.HTTPConnection(cameraHost)\n jsonDict = {\"method\":\"setShootMode\",\"params\":[\"movie\"],\"id\":1,\"version\":\"1.0\"}\n jsonData = json.dumps(jsonDict)\n print(jsonData)\n control.request(\"POST\", cameraUrl, body=jsonData)\n # control.request(\"POST\", cameraUrl)\n conres = control.getresponse()\n control.close()\n jsonRes = json.load(conres)\n print(jsonRes)\n #\n control = http.client.HTTPConnection(cameraHost)\n jsonDict = {\"method\":\"startMovieRec\",\"params\":[],\"id\":1,\"version\":\"1.0\"}\n jsonData = json.dumps(jsonDict)\n print(jsonData)\n control.request(\"POST\", cameraUrl, body=jsonData)\n # control.request(\"POST\", cameraUrl)\n conres = control.getresponse()\n control.close()\n jsonRes = json.load(conres)\n print(jsonRes)\n\n time.sleep(1)\n\n conn = http.client.HTTPConnection(host)\n conn.request(\"GET\", '/'+url)\n res = conn.getresponse()\n\n\n ser = serial.Serial(\"COM10\", 115200)\n com = cvs.SerialCTRl(ser)\n sentData = bytearray(b'\\x11\\x22\\x33\\x44\\x11')\n com.write(sentData)\n\n\n # PD = PD.PDControl(0.5, 0.3)\n PIDctrl = PID.PIDControl(0.5, 0.3, 0.3)\n\n payloadData = None\n pastData = None\n\n cnt = 0\n while True:\n cnt += 1\n\n commonHeaderLength = 1 + 1 + 2 + 4\n commonHeader = res.read(commonHeaderLength)\n payloadType = commonHeader[1]\n sequenceNumber = commonHeader[2:4]\n # print(\"Payload type: %d\" % payloadType)\n\n payloadHeader = res.read(128)\n startCode = payloadHeader[0:4]\n\n payloadDataSize = payloadHeader[4:7]\n paddingSize = payloadHeader[7]\n # print(\"%d, %d, %d\" % (payloadDataSize[0],payloadDataSize[1],payloadDataSize[2]))\n dataSize = int.from_bytes(payloadDataSize,'big')\n # print(\"Data size [Bytes]: %d\" % dataSize)\n # print(\"Padding size [Bytes]: %d\" % paddingSize)\n\n if payloadData != None:\n pastData = payloadData\n pass\n payloadData = res.read(dataSize)\n if paddingSize != 0:\n paddingData = res.read(paddingSize)\n\n if payloadType == 1:\n # print(\"Show:\")\n\n # redefineThresholdCTRL(thresholdCTRL, error)\n if cnt % 5 == 0:\n img_np = cv2.imdecode(np.fromstring(payloadData, np.uint8), cv2.IMREAD_COLOR)\n if pastData != None:\n imgPast_np = cv2.imdecode(np.fromstring(pastData, np.uint8), cv2.IMREAD_COLOR)\n error, center = binmom.runPink(img_np)\n # error, center = binmom.run(img_np)\n # print(error)\n # controlMotorPID(PIDctrl, error, com)\n controlMotor1(error, com, thresholdCTRL)\n\n if cnt % 5 == 0:\n cv2.namedWindow('StarTracker', cv2.WINDOW_NORMAL)\n cv2.imshow(\"StarTracker\",img_np)\n cv2.waitKey(1)\n\n if cnt % 100 == 0:\n img = Image.open(io.BytesIO(payloadData))\n\n filename = 'out%d.jpg' % int.from_bytes(sequenceNumber,'big')\n img.save(filename)\n\n # pass\n\n\n # controlMotorPD(PD, error, com)\n\n\n\nif __name__ == '__main__':\n run()\n"
] |
[
[
"numpy.fromstring"
]
] |
eliask/georinex
|
[
"f2d9c03cecbe1ecd27a17eb0fc7957ac69c1a34c"
] |
[
"georinex/nav2.py"
] |
[
"#!/usr/bin/env python\nfrom pathlib import Path\nfrom datetime import datetime\nfrom typing import Dict, Union, Any, Sequence\nfrom typing.io import TextIO\nimport xarray\nimport numpy as np\nimport logging\n\nfrom .io import opener, rinexinfo\nfrom .common import rinex_string_to_float\n#\nSTARTCOL2 = 3 # column where numerical data starts for RINEX 2\nNl = {'G': 7, 'R': 3, 'E': 7} # number of additional SV lines\n\n\ndef rinexnav2(fn: Union[TextIO, str, Path],\n tlim: Sequence[datetime] = None) -> xarray.Dataset:\n \"\"\"\n Reads RINEX 2.x NAV files\n Michael Hirsch, Ph.D.\n SciVision, Inc.\n\n http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html\n ftp://igs.org/pub/data/format/rinex211.txt\n \"\"\"\n if isinstance(fn, (str, Path)):\n fn = Path(fn).expanduser()\n\n Lf = 19 # string length per field\n\n svs = []\n times = []\n raws = []\n\n with opener(fn) as f:\n\n header = navheader2(f)\n\n if header['filetype'] == 'N':\n svtype = 'G'\n fields = ['SVclockBias', 'SVclockDrift', 'SVclockDriftRate',\n 'IODE', 'Crs', 'DeltaN', 'M0',\n 'Cuc', 'Eccentricity', 'Cus', 'sqrtA',\n 'Toe', 'Cic', 'Omega0', 'Cis',\n 'Io', 'Crc', 'omega', 'OmegaDot',\n 'IDOT', 'CodesL2', 'GPSWeek', 'L2Pflag',\n 'SVacc', 'health', 'TGD', 'IODC',\n 'TransTime', 'FitIntvl']\n elif header['filetype'] == 'G':\n svtype = 'R' # GLONASS\n fields = ['SVclockBias', 'SVrelFreqBias', 'MessageFrameTime',\n 'X', 'dX', 'dX2', 'health',\n 'Y', 'dY', 'dY2', 'FreqNum',\n 'Z', 'dZ', 'dZ2', 'AgeOpInfo']\n elif header['filetype'] == 'E':\n svtype = 'E' # Galileo\n fields = ['SVclockBias', 'SVclockDrift', 'SVclockDriftRate',\n 'IODnav', 'Crs', 'DeltaN', 'M0',\n 'Cuc', 'Eccentricity', 'Cus', 'sqrtA',\n 'Toe', 'Cic', 'Omega0', 'Cis',\n 'Io', 'Crc', 'omega', 'OmegaDot',\n 'IDOT', 'DataSrc', 'GALWeek',\n 'SISA', 'health', 'BGDe5a', 'BGDe5b',\n 'TransTime']\n else:\n raise NotImplementedError(f'I do not yet handle Rinex 2 NAV {header[\"sys\"]} {fn}')\n# %% read data\n for ln in f:\n try:\n time = _timenav(ln)\n except ValueError:\n continue\n\n if tlim is not None:\n if time < tlim[0]:\n _skip(f, Nl[header['systems']])\n continue\n elif time > tlim[1]:\n break\n# %% format I2 http://gage.upc.edu/sites/default/files/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html\n svs.append(f'{svtype}{ln[:2]}')\n\n times.append(time)\n \"\"\"\n now get the data as one big long string per SV\n \"\"\"\n raw = ln[22:79] # NOTE: MUST be 79, not 80 due to some files that put \\n a character early!\n for _ in range(Nl[header['systems']]):\n raw += f.readline()[STARTCOL2:79]\n # one line per SV\n # NOTE: Sebastijan added .replace(' ', ' ').replace(' -', '-')\n # here, I would like to see a file that needs this first, to be sure\n # I'm not needlessly slowing down reading or creating new problems.\n raws.append(raw.replace('D', 'E').replace('\\n', ''))\n\n# %% parse\n svs = [s.replace(' ', '0') for s in svs]\n svu = sorted(set(svs))\n\n atimes = np.asarray(times)\n timesu = np.unique(atimes)\n data = np.empty((len(fields), timesu.size, len(svu)))\n data.fill(np.nan)\n\n for j, sv in enumerate(svu): # for each SV, across all values and times...\n svi = [i for i, s in enumerate(svs) if s == sv] # these rows are for this SV\n\n tu = np.unique(atimes[svi]) # this SV was seen at these times\n if tu.size != atimes[svi].size:\n logging.warning(f'duplicate times detected, skipping SV {sv}')\n continue\n\n for i in svi:\n it = np.nonzero(timesu == times[i])[0][0] # int by defn\n \"\"\"\n some files sometimes drop the last measurement, this fixes that.\n It assumes the blank is always in the last measurement for now.\n \"\"\"\n dvec = [float(raws[i][k*Lf:(k+1)*Lf]) for k in range(min(len(fields), len(raws[i])//Lf))]\n data[:len(dvec), it, j] = dvec\n\n# %% assemble output\n # NOTE: time must be datetime64[ns] or .to_netcdf will fail\n nav = xarray.Dataset(coords={'time': timesu.astype('datetime64[ns]'), 'sv': svu})\n\n for i, k in enumerate(fields):\n if k is None:\n continue\n nav[k] = (('time', 'sv'), data[i, :, :])\n\n # GLONASS uses kilometers to report its ephemeris.\n # Convert to meters here to be consistent with NAV3 implementation.\n if svtype == 'R':\n for name in ['X', 'Y', 'Z', 'dX', 'dY', 'dZ', 'dX2', 'dY2', 'dZ2']:\n nav[name] *= 1e3\n\n# %% other attributes\n nav.attrs['version'] = header['version']\n nav.attrs['svtype'] = [svtype] # Use list for consistency with NAV3.\n nav.attrs['rinextype'] = 'nav'\n if isinstance(fn, Path):\n nav.attrs['filename'] = fn.name\n\n if 'ION ALPHA' in header and 'ION BETA' in header:\n alpha = header['ION ALPHA']\n alpha = [rinex_string_to_float(alpha[2 + i*12:2 + (i+1)*12])\n for i in range(4)]\n beta = header['ION BETA']\n beta = [rinex_string_to_float(beta[2 + i*12:2 + (i+1)*12])\n for i in range(4)]\n nav.attrs['ionospheric_corr_GPS'] = np.hstack((alpha, beta))\n\n return nav\n\n\ndef navheader2(f: TextIO) -> Dict[str, Any]:\n \"\"\"\n For RINEX NAV version 2 only. End users should use rinexheader()\n \"\"\"\n if isinstance(f, (str, Path)):\n with opener(f, header=True) as h:\n return navheader2(h)\n\n hdr = rinexinfo(f)\n\n for ln in f:\n if 'END OF HEADER' in ln:\n break\n kind, content = ln[60:].strip(), ln[:60]\n hdr[kind] = content\n\n return hdr\n\n\ndef _timenav(ln: str) -> datetime:\n\n year = int(ln[3:5])\n if 80 <= year <= 99:\n year += 1900\n elif year < 80: # because we might pass in four-digit year\n year += 2000\n else:\n raise ValueError(f'unknown year format {year}')\n\n return datetime(year=year,\n month=int(ln[6:8]),\n day=int(ln[9:11]),\n hour=int(ln[12:14]),\n minute=int(ln[15:17]),\n second=int(float(ln[17:20])),\n microsecond=int(float(ln[17:22]) % 1 * 1000000)\n )\n\n\ndef _skip(f: TextIO, Nl: int):\n for _, _ in zip(range(Nl), f):\n pass\n\n\ndef navtime2(fn: Union[TextIO, Path]) -> np.ndarray:\n \"\"\"\n read all times in RINEX 2 NAV file\n \"\"\"\n times = []\n with opener(fn) as f:\n hdr = navheader2(f)\n\n while True:\n ln = f.readline()\n if not ln:\n break\n\n try:\n time = _timenav(ln)\n except ValueError:\n continue\n\n times.append(time)\n\n _skip(f, Nl[hdr['systems']])\n\n return np.unique(times)\n"
] |
[
[
"numpy.asarray",
"numpy.hstack",
"numpy.nonzero",
"numpy.unique"
]
] |
ndevenish/dxtbx
|
[
"2e3fff616dd99e5e7557e9774e4357bacae59f1b"
] |
[
"command_line/plot_detector_models.py"
] |
[
"# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\nfrom libtbx.phil import parse\nfrom libtbx.utils import Sorry\nfrom scitbx.matrix import col\n\nfrom dxtbx.datablock import DataBlockFactory\nfrom dxtbx.model.experiment_list import ExperimentListFactory\n\nusage = \"\"\"Plot dxtbx detector models. Provide multiple json files if desired\nExample: dxtbx.plot_detector_models datablock1.json datablock2.json\n\"\"\"\n\n\nphil_scope = parse(\n \"\"\"\n show_origin_vectors = True\n .type = bool\n .help = If true, draw origin vectors as arrows\n orthographic = False\n .type = bool\n .help = If true, draw an orthographic projection (IE drop the Z-axis)\n panel_numbers = True\n .type = bool\n .help = If true, label panel numbers\n pdf_file = None\n .type = path\n .help = If not None, save the result as a pdf figure.\n plot_all_detectors = True\n .type = bool\n .help = If False, plot only the first detector model found\n\"\"\"\n)\n\n\n# http://stackoverflow.com/questions/22867620/putting-arrowheads-on-vectors-in-matplotlibs-3d-plot\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\ndef plot_group(\n g, color, ax, orthographic=False, show_origin_vectors=True, panel_numbers=True\n):\n # recursively plot a detector group\n p = g.parent()\n if show_origin_vectors:\n if p is None:\n # parent origin\n pori = (0, 0, 0)\n else:\n # parent origin\n pori = p.get_origin()\n ori = g.get_origin()\n if not orthographic:\n a = Arrow3D(\n [pori[0], ori[0]],\n [pori[1], ori[1]],\n [pori[2], ori[2]],\n mutation_scale=20,\n lw=1,\n arrowstyle=\"-|>\",\n color=\"gray\",\n )\n ax.add_artist(a)\n if g.is_group():\n for c in g:\n # plot all the children\n plot_group(c, color, ax, orthographic, show_origin_vectors, panel_numbers)\n else:\n # plot the panel boundaries\n size = g.get_image_size()\n p0 = col(g.get_pixel_lab_coord((0, 0)))\n p1 = col(g.get_pixel_lab_coord((size[0] - 1, 0)))\n p2 = col(g.get_pixel_lab_coord((size[0] - 1, size[1] - 1)))\n p3 = col(g.get_pixel_lab_coord((0, size[1] - 1)))\n v1 = p1 - p0\n v2 = p3 - p0\n vcen = ((v2 / 2) + (v1 / 2)) + p0\n z = list(zip(p0, p1, p2, p3, p0))\n\n if orthographic:\n ax.plot(z[0], z[1], color=color)\n\n if panel_numbers:\n # Annotate with panel numbers\n ax.text(vcen[0], vcen[1], \"%d\" % g.index())\n else:\n ax.plot(z[0], z[1], z[2], color=color)\n\n if panel_numbers:\n # Annotate with panel numbers\n ax.text(vcen[0], vcen[1], vcen[2], \"%d\" % g.index())\n\n\ndef run(args):\n user_phil = []\n files = []\n for arg in args:\n if os.path.isfile(arg):\n files.append(arg)\n else:\n try:\n user_phil.append(parse(arg))\n except Exception:\n raise Sorry(\"Unrecognized argument %s\" % arg)\n params = phil_scope.fetch(sources=user_phil).extract()\n\n fig = plt.figure()\n colormap = plt.cm.gist_ncar\n colors = [colormap(i) for i in np.linspace(0, 0.9, len(files))]\n for file_name, color in zip(files, colors):\n\n # read the data and get the detector models\n try:\n datablocks = DataBlockFactory.from_json_file(file_name, check_format=False)\n detectors = sum((db.unique_detectors() for db in datablocks), [])\n except Exception:\n try:\n experiments = ExperimentListFactory.from_json_file(\n file_name, check_format=False\n )\n except ValueError:\n experiments = ExperimentListFactory.from_filenames([file_name])\n detectors = experiments.detectors()\n if not params.plot_all_detectors:\n detectors = detectors[0:1]\n for detector in detectors:\n # plot the hierarchy\n if params.orthographic:\n ax = fig.gca()\n else:\n ax = fig.gca(projection=\"3d\")\n plot_group(\n detector.hierarchy(),\n color,\n ax,\n orthographic=params.orthographic,\n show_origin_vectors=params.show_origin_vectors,\n panel_numbers=params.panel_numbers,\n )\n\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n if params.orthographic:\n plt.axes().set_aspect(\"equal\", \"datalim\")\n\n if params.pdf_file:\n pp = PdfPages(params.pdf_file)\n for i in plt.get_fignums():\n pp.savefig(plt.figure(i))\n pp.close()\n else:\n plt.show()\n\n\nif __name__ == \"__main__\":\n run(sys.argv[1:])\n"
] |
[
[
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.patches.FancyArrowPatch.draw",
"matplotlib.pyplot.get_fignums",
"matplotlib.patches.FancyArrowPatch.__init__",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Spico197/DocEE
|
[
"d6b585e29e5908b891e765066b96ff7642587e5a"
] |
[
"dee/tasks/dee_task.py"
] |
[
"import copy\nimport glob\nimport logging\nimport os\nfrom itertools import combinations, product\n\nimport torch\nimport torch.distributed as dist\nimport torch.optim as optim\nfrom loguru import logger\nfrom tqdm import tqdm\nfrom transformers.models.bert.modeling_bert import BertConfig\n\nimport dee.models\nfrom dee.event_types import get_event_template\nfrom dee.helper import (\n DEEArgRelFeatureConverter,\n DEEExample,\n DEEExampleLoader,\n DEEFeatureConverter,\n DEPPNFeatureConverter,\n convert_dee_arg_rel_features_to_dataset,\n convert_dee_features_to_dataset,\n convert_deppn_features_to_dataset,\n convert_string_to_raw_input,\n decode_dump_template,\n eval_dump_template,\n match_arg,\n measure_dee_prediction,\n prepare_doc_batch_dict,\n)\nfrom dee.models import DCFEEModel, Doc2EDAGModel\nfrom dee.modules import LSTMBiaffineNERModel, LSTMMaskedCRFNERModel\nfrom dee.modules.ner_model import BERTCRFNERModel\nfrom dee.tasks.base_task import BasePytorchTask, TaskSetting\nfrom dee.utils import (\n BertTokenizerForDocEE,\n chain_prod,\n default_dump_json,\n default_load_pkl,\n get_cosine_schedule_with_warmup,\n list_models,\n remove_event_obj_roles,\n)\n\n\nclass DEETaskSetting(TaskSetting):\n base_key_attrs = TaskSetting.base_key_attrs\n base_attr_default_pairs = [\n # ('train_file_name', 'typed_train.json'),\n (\"train_file_name\", \"typed_train.json\"),\n (\"dev_file_name\", \"typed_dev.json\"),\n (\"test_file_name\", \"typed_test.json\"),\n # ('train_file_name', 'typed_sample_train_48.json'),\n # ('dev_file_name', 'typed_sample_train_48.json'),\n # ('test_file_name', 'typed_sample_train_48.json'),\n (\"summary_dir_name\", \"Summary/Summary\"),\n (\"event_type_template\", \"zheng2019_trigger_graph_no_OtherType\"),\n (\"max_sent_len\", 128),\n (\"max_sent_num\", 64),\n (\"train_batch_size\", 64),\n (\"gradient_accumulation_steps\", 8),\n (\"eval_batch_size\", 2),\n (\"learning_rate\", 1e-4),\n (\"use_lr_scheduler\", False),\n (\"lr_scheduler_step\", 20),\n (\"num_train_epochs\", 100),\n # ('num_train_epochs', 30),\n (\"no_cuda\", False),\n (\"local_rank\", -1),\n (\"seed\", 99),\n (\"optimize_on_cpu\", False),\n (\"fp16\", False),\n (\"use_bert\", False), # whether to use bert as the encoder\n (\"use_biaffine_ner\", False), # use biaffine ner model\n (\"use_masked_crf\", False),\n (\n \"bert_model\",\n \"/home/tzhu/bert-pretrained-models/bert-base-chinese\",\n ), # use which pretrained bert model\n (\"only_master_logging\", True), # whether to print logs from multiple processes\n (\n \"resume_latest_cpt\",\n True,\n ), # whether to resume latest checkpoints when training for fault tolerance\n (\"remove_last_cpt\", False),\n (\"save_best_cpt\", False),\n (\n \"cpt_file_name\",\n \"Doc2EDAG\",\n ), # decide the identity of checkpoints, evaluation results, etc.\n (\"model_type\", \"Doc2EDAG\"), # decide the model class used\n (\"rearrange_sent\", False), # whether to rearrange sentences\n (\"use_crf_layer\", True), # whether to use CRF Layer\n (\"min_teacher_prob\", 0.1), # the minimum prob to use gold spans\n (\"schedule_epoch_start\", 10), # from which epoch the scheduled sampling starts\n (\n \"schedule_epoch_length\",\n 10,\n ), # the number of epochs to linearly transit to the min_teacher_prob\n (\"loss_lambda\", 0.05), # the proportion of ner loss\n (\"loss_gamma\", 1.0), # the scaling proportion of missed span sentence ner loss\n (\"deppn_ner_loss_weight\", 0.1), # the proportion of ner loss\n (\n \"deppn_type_loss_weight\",\n 0.4,\n ), # the proportion of event type classification loss\n (\n \"deppn_event_generation_loss_weight\",\n 0.5,\n ), # the proportion of event generation loss\n (\"deppn_decoder_lr\", 2e-5), # learning rate of DEPPN decoder\n (\"deppn_num_event2role_decoder_layer\", 4),\n (\"deppn_train_on_multi_events\", True),\n (\"deppn_train_on_single_event\", True),\n (\"deppn_event_type_classes\", 2),\n (\"deppn_num_generated_sets\", 5),\n (\"deppn_num_set_decoder_layers\", 2),\n (\"deppn_num_role_decoder_layers\", 4),\n (\"deppn_cost_weight\", {\"event_type\": 1, \"role\": 0.5}),\n (\"deppn_train_on_multi_roles\", False),\n (\"deppn_use_event_type_enc\", True),\n (\"deppn_use_role_decoder\", True),\n (\"deppn_use_sent_span_encoder\", False),\n (\"deppn_train_nopair_sets\", True),\n (\"deppn_hidden_dropout\", 0.1),\n (\"deppn_layer_norm_eps\", 1e-12),\n (\"deppn_event_type_weight\", [1, 0.2]),\n (\"add_greedy_dec\", True), # whether to add additional greedy decoding\n (\"use_token_role\", True), # whether to use detailed token role\n (\n \"seq_reduce_type\",\n \"MaxPooling\",\n ), # use 'MaxPooling', 'MeanPooling' or 'AWA' to reduce a tensor sequence\n # network parameters (follow Bert Base)\n (\"hidden_size\", 768),\n (\"dropout\", 0.1),\n (\"ff_size\", 1024), # feed-forward mid layer size\n (\"num_tf_layers\", 4), # transformer layer number\n # ablation study parameters,\n (\"use_path_mem\", True), # whether to use the memory module when expanding paths\n (\"use_scheduled_sampling\", True), # whether to use the scheduled sampling\n (\"use_doc_enc\", True), # whether to use document-level entity encoding\n (\"neg_field_loss_scaling\", 3.0), # prefer FNs over FPs\n (\"gcn_layer\", 3), # prefer FNs over FPs\n (\"num_ner_tf_layers\", 4),\n # LSTM MTL\n (\"num_lstm_layers\", 1), # number of lstm layers\n (\"use_span_lstm\", False), # add lstm module after span representation\n (\"span_lstm_num_layer\", 1), # add lstm module after span representation\n (\"use_span_att\", False), # add self-attention for spans after lstm encoding\n (\"span_att_heads\", 4),\n # number of head in dot attention\n (\"dot_att_head\", 4),\n # comb sampling parameters\n (\"comb_samp_min_num_span\", 2),\n (\"comb_samp_num_samp\", 100),\n (\"comb_samp_max_samp_times\", 1000),\n # Arg Triangle Relation\n # use lstm encoder for spans instead of linear projection before biaffine prediction\n (\"use_span_lstm_projection\", False),\n (\"biaffine_hidden_size\", 256),\n (\"triaffine_hidden_size\", 150),\n (\"vi_max_iter\", 3),\n (\"biaffine_hard_threshold\", 0.5),\n (\"event_cls_loss_weight\", 1.0),\n (\"smooth_attn_loss_weight\", 1.0),\n # ('combination_loss_weight', 0.1),\n (\"combination_loss_weight\", 1.0),\n (\"comb_cls_loss_weight\", 1.0),\n (\"comb_sim_loss_weight\", 1.0),\n (\"span_cls_loss_weight\", 1.0),\n (\"use_comb_cls_pred\", False),\n (\"role_loss_weight\", 1.0),\n (\"event_relevant_combination\", False),\n # running mode for data selection and other debug options\n # choices: full, quarter, debug\n # full: all the training data\n # quarter: use quarter of training data\n # debug: use the 48 debug instances and simplify\n # the pred_adj_mat decoding for CompleteGraph model\n (\"run_mode\", \"full\"),\n # drop irrelevant entities during data preprocessing and\n # make sure all the entities appear in the final event combinations\n (\"drop_irr_ents\", False),\n (\"at_least_one_comb\", False),\n (\"include_complementary_ents\", False),\n (\"filtered_data_types\", \"o2o,o2m,m2m\"),\n (\"ent_context_window\", 20),\n (\"biaffine_grad_clip\", False),\n (\"global_grad_clip\", False),\n # entity fixing mode:\n # - `n`: no fixing\n # - `-`: remove wrong ones\n # - `f`: fix wrong ones\n (\"ent_fix_mode\", \"n\"),\n (\"span_mention_sum\", False),\n (\"add_adj_mat_weight_bias\", False),\n (\"optimizer\", \"adam\"),\n # number of triggers, choices among 1, 2, 3 and the others (complete graph)\n (\"num_triggers\", 0),\n (\"eval_num_triggers\", 0),\n (\"with_left_trigger\", False),\n (\"with_all_one_trigger_comb\", False),\n (\"directed_trigger_graph\", False),\n (\"adj_sim_head\", 1),\n (\"adj_sim_agg\", \"mean\"),\n (\"adj_sim_split_head\", False),\n # for multi-step triggering\n (\"num_triggering_steps\", 1),\n # structures\n (\"use_shared_dropout_proj\", False),\n (\"use_layer_norm_b4_biaffine\", False),\n (\"remove_mention_type_layer_norm\", False),\n (\"use_token_drop\", False),\n (\"guessing_decode\", False),\n (\"max_clique_decode\", False),\n (\"try_to_make_up\", False), # data building\n (\"self_loop\", False), # combination decoding\n (\"incremental_min_conn\", -1),\n (\"use_span_self_att\", False),\n (\"use_smooth_span_self_att\", False),\n (\"ment_feature_type\", \"plus\"),\n (\"ment_type_hidden_size\", 32),\n (\"num_mention_lstm_layer\", 1),\n (\"gat_alpha\", 0.2),\n (\"gat_num_heads\", 4),\n (\"gat_num_layers\", 2),\n (\"role_by_encoding\", False),\n (\"use_mention_lstm\", False),\n (\"mlp_before_adj_measure\", False),\n (\"use_field_cls_mlp\", False),\n (\"build_dense_connected_doc_graph\", False),\n (\"stop_gradient\", False),\n ]\n\n def __init__(self, **kwargs):\n super(DEETaskSetting, self).__init__(\n self.base_key_attrs, self.base_attr_default_pairs, **kwargs\n )\n if self.run_mode == \"full\":\n self.train_file_name = \"typed_train.json\"\n self.dev_file_name = \"typed_dev.json\"\n self.test_file_name = \"typed_test.json\"\n self.doc_lang = \"zh\"\n elif self.run_mode == \"half\":\n self.train_file_name = \"typed_train_1o2.json\"\n self.dev_file_name = \"typed_dev.json\"\n self.test_file_name = \"typed_test.json\"\n self.doc_lang = \"zh\"\n elif self.run_mode == \"quarter\":\n self.train_file_name = \"typed_train_1o4.json\"\n self.dev_file_name = \"typed_dev.json\"\n self.test_file_name = \"typed_test.json\"\n self.doc_lang = \"zh\"\n elif self.run_mode == \"1o8\":\n self.train_file_name = \"typed_train_1o8.json\"\n self.dev_file_name = \"typed_dev.json\"\n self.test_file_name = \"typed_test.json\"\n self.doc_lang = \"zh\"\n elif self.run_mode == \"debug\":\n self.train_file_name = \"typed_sample_train_48.json\"\n self.dev_file_name = \"typed_sample_train_48.json\"\n self.test_file_name = \"typed_sample_train_48.json\"\n self.doc_lang = \"zh\"\n elif self.run_mode == \"dueefin_wo_tgg\":\n self.train_file_name = \"dueefin_train_wo_tgg.json\"\n self.dev_file_name = \"dueefin_dev_wo_tgg.json\"\n self.test_file_name = \"dueefin_dev_wo_tgg.json\"\n self.inference_file_name = \"dueefin_submit_wo_tgg.json\"\n self.doc_lang = \"zh\"\n elif self.run_mode == \"dueefin_w_tgg\":\n self.train_file_name = \"dueefin_train_w_tgg.json\"\n self.dev_file_name = \"dueefin_dev_w_tgg.json\"\n self.test_file_name = \"dueefin_dev_w_tgg.json\"\n self.inference_file_name = \"dueefin_submit_w_tgg.json\"\n self.doc_lang = \"zh\"\n else:\n raise ValueError(f\"run_mode: {self.run_mode} is not supported\")\n if isinstance(self.filtered_data_types, str):\n self.filtered_data_types = self.filtered_data_types.split(\",\")\n\n\nclass DEETask(BasePytorchTask):\n \"\"\"Doc-level Event Extraction Task\"\"\"\n\n def __init__(\n self,\n dee_setting,\n load_train=True,\n load_dev=True,\n load_test=True,\n load_inference=False,\n parallel_decorate=True,\n ):\n super(DEETask, self).__init__(\n dee_setting, only_master_logging=dee_setting.only_master_logging\n )\n self.best_f1 = -1.0\n self.logger = logger\n self.logging(\"Initializing {}\".format(self.__class__.__name__))\n\n self.tokenizer = BertTokenizerForDocEE.from_pretrained(\n self.setting.bert_model, doc_lang=self.setting.doc_lang\n )\n self.setting.vocab_size = len(self.tokenizer.vocab)\n\n # get event type template\n self.event_template = get_event_template(self.setting.event_type_template)\n\n # get entity and event label name\n self.entity_label_list = DEEExample.get_entity_label_list(self.event_template)\n self.event_type_fields_pairs = DEEExample.get_event_type_fields_pairs(\n self.event_template\n )\n\n # build example loader\n self.example_loader_func = DEEExampleLoader(\n self.event_template,\n self.tokenizer,\n self.setting.rearrange_sent,\n self.setting.max_sent_len,\n drop_irr_ents_flag=self.setting.drop_irr_ents,\n include_complementary_ents=self.setting.include_complementary_ents,\n filtered_data_types=self.setting.filtered_data_types,\n )\n\n if not self.setting.use_token_role:\n # no token role conflicts with some settings\n if self.setting.model_type != \"Doc2EDAG\":\n logger.warning(\n \"Model is not Doc2EDAG! Make sure you know what you are doing here.\"\n )\n assert self.setting.add_greedy_dec is False\n self.setting.num_entity_labels = 3 # 0: 'O', 1: 'Begin', 2: 'Inside'\n else:\n self.setting.num_entity_labels = len(self.entity_label_list)\n\n self.setting.tag_id2tag_name = {\n idx: name for idx, name in enumerate(self.entity_label_list)\n }\n self.setting.ent_type2id = {\n event_type: idx\n for idx, event_type in enumerate(\n [\n x[2:]\n for x in filter(\n lambda x: x.startswith(\"B-\"),\n self.setting.tag_id2tag_name.values(),\n )\n ]\n )\n }\n self.setting.ent_type2id[\"O\"] = len(self.setting.ent_type2id)\n\n supported_models = list_models()\n if self.setting.use_bert:\n bert_config = BertConfig.from_pretrained(self.setting.bert_model)\n bert_config.model_type = self.setting.model_type\n self.setting.update_by_dict(bert_config.__dict__) # BertConfig dictionary\n ner_model = BERTCRFNERModel(self.setting)\n elif self.setting.use_biaffine_ner:\n ner_model = LSTMBiaffineNERModel(self.setting)\n elif self.setting.use_masked_crf:\n ner_model = LSTMMaskedCRFNERModel(self.setting)\n else:\n ner_model = None\n\n if self.setting.model_type == \"DEPPNModel\":\n bert_config = BertConfig.from_pretrained(self.setting.bert_model)\n self.setting.update_by_dict(bert_config.__dict__)\n self.setting.model_type = \"DEPPNModel\"\n\n if self.setting.model_type in {\"Doc2EDAG\", \"Doc2EDAGModel\"}:\n self.model = Doc2EDAGModel(\n self.setting,\n self.event_type_fields_pairs,\n ner_model=ner_model,\n )\n elif self.setting.model_type in {\"DCFEE\", \"DCFEEModel\"}:\n self.model = DCFEEModel(\n self.setting, self.event_type_fields_pairs, ner_model=ner_model\n )\n elif self.setting.model_type in supported_models:\n model_class = getattr(dee.models, self.setting.model_type)\n self.model = model_class(\n self.setting, self.event_type_fields_pairs, ner_model=ner_model\n )\n elif self.setting.model_type + \"Model\" in supported_models:\n model_class = getattr(dee.models, self.setting.model_type + \"Model\")\n self.model = model_class(\n self.setting, self.event_type_fields_pairs, ner_model=ner_model\n )\n else:\n raise Exception(\"Unsupported model type {}\".format(self.setting.model_type))\n\n all_trainable = []\n fixed = []\n for name, param in self.model.named_parameters():\n param_num = chain_prod(param.size())\n if param.requires_grad:\n logger.info(\n \"Trainable: {:20}\\t{:20}\\t{}\".format(\n name, str(param.size()), param_num\n )\n )\n all_trainable.append(param_num)\n else:\n logger.info(\n \"Untrainable: {:20}\\t{:20}\\t{}\".format(\n name, str(param.size()), param_num\n )\n )\n fixed.append(param_num)\n\n logger.info(f\"#Total Trainable Parameters: {sum(all_trainable)}\")\n logger.info(f\"#Total Fixed Parameters: {sum(fixed)}\")\n\n self._decorate_model(parallel_decorate=parallel_decorate)\n\n # prepare optimizer\n if self.setting.use_bert:\n param_groups = [\n {\n \"params\": [\n p\n for n, p in self.model.named_parameters()\n if \"bert\" in n.lower()\n ],\n \"lr\": 3e-5,\n },\n {\n \"params\": [\n p\n for n, p in self.model.named_parameters()\n if \"bert\" not in n.lower()\n ],\n \"lr\": self.setting.learning_rate,\n },\n ]\n self.optimizer = optim.AdamW(param_groups)\n else:\n if self.setting.optimizer == \"adamw\":\n self.optimizer = optim.AdamW(\n self.model.parameters(), lr=self.setting.learning_rate\n )\n elif self.setting.optimizer == \"sgd\":\n self.optimizer = optim.SGD(\n self.model.parameters(), lr=self.setting.learning_rate, momentum=0.9\n )\n else:\n self.optimizer = optim.Adam(\n self.model.parameters(), lr=self.setting.learning_rate\n )\n\n # for DE-PPN\n # logic in https://github.com/HangYang-NLP/DE-PPN/blob/812cc8ba92a88049c36978e3abca7f8816c31ead/DEE/DEE_task.py#L162-L163\n # fork from https://github.com/HangYang-NLP/DE-PPN/blob/812cc8ba92a88049c36978e3abca7f8816c31ead/DEE/base_task.py#L375\n if self.setting.model_type == \"DEPPNModel\":\n # Prepare optimizer\n if self.setting.fp16:\n model_named_parameters = [\n (n, param.clone().detach().to(\"cpu\").float().requires_grad_())\n for n, param in self.model.named_parameters()\n ]\n elif self.setting.optimize_on_cpu:\n model_named_parameters = [\n (n, param.clone().detach().to(\"cpu\").requires_grad_())\n for n, param in self.model.named_parameters()\n ]\n else:\n model_named_parameters = list(self.model.named_parameters())\n\n no_decay = [\"bias\", \"gamma\", \"beta\"]\n component = [\"encoder\", \"decoder\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in model_named_parameters\n if n not in no_decay and component[1] not in n\n ],\n \"weight_decay_rate\": 0.01,\n \"lr\": self.setting.learning_rate,\n },\n {\n \"params\": [\n p\n for n, p in model_named_parameters\n if n in no_decay and component[1] not in n\n ],\n \"weight_decay_rate\": 0.0,\n \"lr\": self.setting.learning_rate,\n },\n {\n \"params\": [\n p\n for n, p in model_named_parameters\n if n not in no_decay and component[1] in n\n ],\n \"weight_decay_rate\": 0.01,\n \"lr\": self.setting.deppn_decoder_lr,\n },\n {\n \"params\": [\n p\n for n, p in model_named_parameters\n if n in no_decay and component[1] in n\n ],\n \"weight_decay_rate\": 0.0,\n \"lr\": self.setting.deppn_decoder_lr,\n },\n ]\n\n # for n, p in model_named_parameters:\n # if 'ner_model' in n:\n # p.requires_grad = False\n # self.model.ner_model.requires_grad = False\n\n # num_train_steps = int(\n # len(self.train_examples)\n # / self.setting.train_batch_size\n # / self.setting.gradient_accumulation_steps\n # * self.setting.num_train_epochs\n # )\n\n # optimizer = BertAdam(optimizer_grouped_parameters,\n # warmup=self.setting.warmup_proportion,\n # t_total=num_train_steps)\n\n self.optimizer = optim.AdamW(optimizer_grouped_parameters)\n # scheduler = get_linear_schedule_with_warmup(\n # optimizer, self.setting.warmup_proportion * num_train_steps, num_train_steps\n # )\n\n if self.setting.use_lr_scheduler:\n # self.lr_scheduler = optim.lr_scheduler.StepLR(\n # self.optimizer,\n # step_size=self.setting.lr_scheduler_step,\n # gamma=0.5)\n self.lr_scheduler = get_cosine_schedule_with_warmup(\n self.optimizer,\n num_warmup_steps=max(1, int(0.02 * self.setting.num_train_epochs)),\n num_training_steps=self.setting.num_train_epochs,\n )\n\n # build feature converter\n if self.setting.model_type in [\n \"Doc2EDAG\",\n \"Doc2EDAGModel\",\n \"DCFEE\",\n \"DCFEEModel\",\n \"GITModel\",\n ]:\n convert_dataset_func = convert_dee_features_to_dataset\n self.feature_converter_func = DEEFeatureConverter(\n self.entity_label_list,\n self.event_template,\n self.setting.max_sent_len,\n self.setting.max_sent_num,\n self.tokenizer,\n include_cls=self.setting.use_bert,\n include_sep=self.setting.use_bert,\n )\n elif self.setting.model_type == \"DEPPNModel\":\n # use DEEFeature\n convert_dataset_func = convert_deppn_features_to_dataset\n self.feature_converter_func = DEPPNFeatureConverter(\n self.entity_label_list,\n self.event_template,\n self.setting.max_sent_len,\n self.setting.max_sent_num,\n self.tokenizer,\n include_cls=self.setting.use_bert,\n include_sep=self.setting.use_bert,\n )\n else:\n # use DEEArgRelFeature\n convert_dataset_func = convert_dee_arg_rel_features_to_dataset\n self.feature_converter_func = DEEArgRelFeatureConverter(\n self.entity_label_list,\n self.event_template,\n self.setting.max_sent_len,\n self.setting.max_sent_num,\n self.tokenizer,\n include_cls=False,\n include_sep=False,\n trigger_aware=self.setting.num_triggers != 0,\n num_triggers=self.setting.num_triggers,\n directed_graph=self.setting.directed_trigger_graph,\n try_to_make_up=self.setting.try_to_make_up,\n )\n\n # load data\n self._load_data(\n self.example_loader_func,\n self.feature_converter_func,\n convert_dataset_func,\n load_train=load_train,\n load_dev=load_dev,\n load_test=load_test,\n load_inference=load_inference,\n )\n # customized mini-batch producer\n self.custom_collate_fn = prepare_doc_batch_dict\n\n # # resume option\n # if resume_model or resume_optimizer:\n # self.resume_checkpoint(resume_model=resume_model, resume_optimizer=resume_optimizer)\n\n self.min_teacher_prob = None\n self.teacher_norm = None\n self.teacher_cnt = None\n self.teacher_base = None\n self.reset_teacher_prob()\n\n self.logging(\"Successfully initialize {}\".format(self.__class__.__name__))\n\n def reset_teacher_prob(self):\n self.min_teacher_prob = self.setting.min_teacher_prob\n if self.train_dataset is None:\n # avoid crashing when not loading training data\n num_step_per_epoch = 500\n else:\n num_step_per_epoch = int(\n len(self.train_dataset) / self.setting.train_batch_size\n )\n self.teacher_norm = num_step_per_epoch * self.setting.schedule_epoch_length\n self.teacher_base = num_step_per_epoch * self.setting.schedule_epoch_start\n self.teacher_cnt = 0\n\n def get_teacher_prob(self, batch_inc_flag=True):\n if self.teacher_cnt < self.teacher_base:\n prob = 1\n else:\n prob = max(\n self.min_teacher_prob,\n (self.teacher_norm - self.teacher_cnt + self.teacher_base)\n / self.teacher_norm,\n )\n\n if batch_inc_flag:\n self.teacher_cnt += 1\n\n return prob\n\n def get_event_idx2entity_idx2field_idx(self):\n entity_idx2entity_type = {}\n for entity_idx, entity_label in enumerate(self.entity_label_list):\n if entity_label == \"O\":\n entity_type = entity_label\n else:\n entity_type = entity_label[2:]\n\n entity_idx2entity_type[entity_idx] = entity_type\n\n event_idx2entity_idx2field_idx = {}\n for event_idx, (event_name, field_types, _, _) in enumerate(\n self.event_type_fields_pairs\n ):\n field_type2field_idx = {}\n for field_idx, field_type in enumerate(field_types):\n field_type2field_idx[field_type] = field_idx\n\n entity_idx2field_idx = {}\n for entity_idx, entity_type in entity_idx2entity_type.items():\n if entity_type in field_type2field_idx:\n entity_idx2field_idx[entity_idx] = field_type2field_idx[entity_type]\n else:\n entity_idx2field_idx[entity_idx] = None\n\n event_idx2entity_idx2field_idx[event_idx] = entity_idx2field_idx\n\n return event_idx2entity_idx2field_idx\n\n def get_loss_on_batch(self, doc_batch_dict, features=None):\n if features is None:\n features = self.train_features\n\n # teacher_prob = 1\n # if use_gold_span, gold spans will be used every time\n # else, teacher_prob will ensure the proportion of using gold spans\n if self.setting.use_scheduled_sampling:\n use_gold_span = False\n teacher_prob = self.get_teacher_prob()\n else:\n use_gold_span = True\n teacher_prob = 1\n\n try:\n loss = self.model(\n doc_batch_dict,\n features,\n use_gold_span=use_gold_span,\n train_flag=True,\n teacher_prob=teacher_prob,\n )\n except Exception:\n # DONE(tzhu): fix this issue for multi-gpu DDP training\n logger.info(\"-\" * 30)\n logger.info(\n \"Exception occurs when processing \"\n + \",\".join(\n [features[ex_idx].guid for ex_idx in doc_batch_dict[\"ex_idx\"]]\n )\n )\n raise Exception(\"Cannot get the loss\")\n\n return loss\n\n def get_event_decode_result_on_batch(\n self, doc_batch_dict, features=None, use_gold_span=False, heuristic_type=None\n ):\n if features is None:\n raise Exception(\"Features mush be provided\")\n\n if heuristic_type is None:\n event_idx2entity_idx2field_idx = None\n else:\n # this mapping is used to get span candidates for each event field\n event_idx2entity_idx2field_idx = self.get_event_idx2entity_idx2field_idx()\n\n batch_eval_results = self.model(\n doc_batch_dict,\n features,\n use_gold_span=use_gold_span,\n train_flag=False,\n event_idx2entity_idx2field_idx=event_idx2entity_idx2field_idx,\n heuristic_type=heuristic_type,\n )\n\n return batch_eval_results\n\n def train(self, save_cpt_flag=True, resume_base_epoch=None):\n self.logging(\"=\" * 20 + \"Start Training\" + \"=\" * 20)\n self.reset_teacher_prob()\n\n # resume_base_epoch arguments have higher priority over settings\n if resume_base_epoch is None:\n # whether to resume latest cpt when restarting, very useful for preemptive scheduling clusters\n if self.setting.resume_latest_cpt:\n resume_base_epoch = self.get_latest_cpt_epoch()\n else:\n resume_base_epoch = 0\n\n # resume cpt if possible\n if resume_base_epoch > 0:\n self.logging(\"Training starts from epoch {}\".format(resume_base_epoch))\n for _ in range(resume_base_epoch):\n self.get_teacher_prob()\n self.resume_cpt_at(\n resume_base_epoch, resume_model=True, resume_optimizer=True\n )\n else:\n self.logging(\"Training starts from scratch\")\n\n self.base_train(\n DEETask.get_loss_on_batch,\n kwargs_dict1={},\n epoch_eval_func=DEETask.resume_save_eval_at,\n kwargs_dict2={\n \"save_cpt_flag\": save_cpt_flag,\n \"resume_cpt_flag\": False,\n },\n base_epoch_idx=resume_base_epoch,\n )\n if self.summary_writer is not None:\n self.summary_writer.close()\n\n def remove_cpt_before(self, epoch):\n prev_epochs = []\n for fn in os.listdir(self.setting.model_dir):\n if fn.startswith(\"{}.cpt\".format(self.setting.cpt_file_name)):\n try:\n ep = int(fn.split(\".\")[-1])\n if ep < epoch:\n prev_epochs.append(ep)\n except Exception:\n continue\n for ep in prev_epochs:\n cpt_filename = \"{}.cpt.{}\".format(self.setting.cpt_file_name, ep)\n prev_cpt_filepath = os.path.join(self.setting.model_dir, cpt_filename)\n os.remove(prev_cpt_filepath)\n\n def resume_save_eval_at(self, epoch, resume_cpt_flag=False, save_cpt_flag=True):\n # if self.is_master_node():\n # print('\\nPROGRESS: {:.2f}%\\n'.format(epoch / self.setting.num_train_epochs * 100))\n self.logging(\n \"Current teacher prob {}\".format(\n self.get_teacher_prob(batch_inc_flag=False)\n )\n )\n\n if resume_cpt_flag:\n self.resume_cpt_at(epoch)\n\n if self.is_master_node() and save_cpt_flag:\n self.save_cpt_at(epoch)\n if self.setting.remove_last_cpt:\n self.remove_cpt_before(epoch)\n\n if self.setting.model_type == \"DCFEE\":\n eval_tasks = product([\"dev\", \"test\"], [False, True], [\"DCFEE-O\", \"DCFEE-M\"])\n else:\n if self.setting.add_greedy_dec:\n eval_tasks = product(\n [\"dev\", \"test\"], [False, True], [\"GreedyDec\", None]\n )\n else:\n eval_tasks = product([\"dev\", \"test\"], [False, True], [None])\n\n # all_id_map = defaultdict(dict)\n for task_idx, (data_type, gold_span_flag, heuristic_type) in enumerate(\n eval_tasks\n ):\n if (\n self.in_distributed_mode()\n and task_idx % dist.get_world_size() != dist.get_rank()\n ):\n continue\n\n if gold_span_flag:\n span_str = \"gold_span\"\n else:\n span_str = \"pred_span\"\n\n if heuristic_type is None:\n # store user-provided name\n model_str = self.setting.cpt_file_name.replace(\".\", \"~\")\n else:\n model_str = heuristic_type\n\n if data_type == \"test\":\n features = copy.deepcopy(self.test_features)\n dataset = copy.deepcopy(self.test_dataset)\n elif data_type == \"dev\":\n features = copy.deepcopy(self.dev_features)\n dataset = copy.deepcopy(self.dev_dataset)\n else:\n raise Exception(\"Unsupported data type {}\".format(data_type))\n decode_dump_name = decode_dump_template.format(\n data_type, span_str, model_str, epoch\n )\n eval_dump_name = eval_dump_template.format(\n data_type, span_str, model_str, epoch\n )\n _, measures = self.eval(\n features,\n dataset,\n use_gold_span=gold_span_flag,\n heuristic_type=heuristic_type,\n dump_decode_pkl_name=decode_dump_name,\n dump_eval_json_name=eval_dump_name,\n )\n if self.is_master_node() and data_type == \"dev\" and gold_span_flag is False:\n curr_f1 = measures[\"overall\"][\"overall\"][\"MicroF1\"]\n self.logging(\n f\"Epoch: {epoch}, Current F1: {curr_f1 * 100:.3f}, Best F1: {self.best_f1 * 100:.3f}, is the best: {curr_f1 > self.best_f1}\"\n )\n if curr_f1 > self.best_f1:\n self.best_f1 = curr_f1\n if self.setting.save_best_cpt:\n self.save_cpt_at(epoch)\n if self.setting.remove_last_cpt:\n self.remove_cpt_before(epoch)\n\n def save_cpt_at(self, epoch):\n self.save_checkpoint(\n cpt_file_name=\"{}.cpt.{}\".format(self.setting.cpt_file_name, epoch),\n epoch=epoch,\n )\n\n def resume_cpt_at(self, epoch, resume_model=True, resume_optimizer=False):\n self.resume_checkpoint(\n cpt_file_name=\"{}.cpt.{}\".format(self.setting.cpt_file_name, epoch),\n resume_model=resume_model,\n resume_optimizer=resume_optimizer,\n )\n\n def get_latest_cpt_epoch(self):\n prev_epochs = []\n for fn in os.listdir(self.setting.model_dir):\n if fn.startswith(\"{}.cpt\".format(self.setting.cpt_file_name)):\n try:\n epoch = int(fn.split(\".\")[-1])\n prev_epochs.append(epoch)\n except Exception:\n continue\n prev_epochs.sort()\n\n if len(prev_epochs) > 0:\n latest_epoch = prev_epochs[-1]\n self.logging(\n \"Pick latest epoch {} from {}\".format(latest_epoch, str(prev_epochs))\n )\n else:\n latest_epoch = 0\n self.logging(\"No previous epoch checkpoints, just start from scratch\")\n\n return latest_epoch\n\n def eval(\n self,\n features,\n dataset,\n use_gold_span=False,\n heuristic_type=None,\n dump_decode_pkl_name=None,\n dump_eval_json_name=None,\n ):\n self.logging(\"=\" * 20 + \"Start Evaluation\" + \"=\" * 20)\n\n if dump_decode_pkl_name is not None:\n dump_decode_pkl_path = os.path.join(\n self.setting.output_dir, dump_decode_pkl_name\n )\n self.logging(\"Dumping decode results into {}\".format(dump_decode_pkl_name))\n else:\n dump_decode_pkl_path = None\n\n total_event_decode_results = self.base_eval(\n dataset,\n DEETask.get_event_decode_result_on_batch,\n reduce_info_type=\"none\",\n dump_pkl_path=dump_decode_pkl_path,\n features=features,\n use_gold_span=use_gold_span,\n heuristic_type=heuristic_type,\n )\n\n self.logging(\"Measure DEE Prediction\")\n\n if dump_eval_json_name is not None:\n dump_eval_json_path = os.path.join(\n self.setting.output_dir, dump_eval_json_name\n )\n self.logging(\"Dumping eval results into {}\".format(dump_eval_json_path))\n else:\n dump_eval_json_path = None\n\n total_eval_res = measure_dee_prediction(\n self.event_type_fields_pairs,\n features,\n total_event_decode_results,\n self.setting.event_relevant_combination,\n dump_json_path=dump_eval_json_path,\n )\n if self.is_master_node():\n dataset_name = span_type = \"unknown\"\n epoch = 0\n if dump_eval_json_name is not None:\n dataset_name, span_type = dump_eval_json_name.split(\".\")[1:3]\n epoch = dump_eval_json_name.split(\".\")[-2]\n elif dump_decode_pkl_name is not None:\n dataset_name, span_type = dump_decode_pkl_name.split(\".\")[1:3]\n epoch = dump_eval_json_name.split(\".\")[-2]\n epoch = int(epoch)\n measure_list = [\n \"classification\",\n \"entity\",\n \"combination\",\n \"overall\",\n \"instance\",\n ]\n if self.summary_writer is not None:\n for measure_name in measure_list:\n self.summary_writer.add_scalars(\n f\"{dataset_name}/{span_type}/{measure_name}\",\n {\n \"o2o\": total_eval_res[\"o2o\"][measure_name][\"MicroF1\"],\n \"o2m\": total_eval_res[\"o2m\"][measure_name][\"MicroF1\"],\n \"m2m\": total_eval_res[\"m2m\"][measure_name][\"MicroF1\"],\n \"overall\": total_eval_res[\"overall\"][measure_name][\n \"MicroF1\"\n ],\n },\n global_step=epoch,\n )\n\n adj_mat_measures = dict()\n raw_combination_measures = dict()\n connection_measures = dict()\n trigger_measures = dict()\n for doc_type in [\"o2o\", \"o2m\", \"m2m\", \"overall\"]:\n if \"adj_mat\" in total_eval_res[doc_type]:\n adj_mat_measures.update(\n {doc_type: total_eval_res[doc_type][\"adj_mat\"][\"Accuracy\"]}\n )\n if \"rawCombination\" in total_eval_res[doc_type]:\n raw_combination_measures.update(\n {\n doc_type: total_eval_res[doc_type][\"rawCombination\"][\n \"MicroF1\"\n ]\n }\n )\n if \"connection\" in total_eval_res[doc_type]:\n connection_measures.update(\n {\n doc_type: total_eval_res[doc_type][\"connection\"][\n \"MicroF1\"\n ]\n }\n )\n if \"trigger\" in total_eval_res[doc_type]:\n trigger_measures.update(\n {doc_type: total_eval_res[doc_type][\"trigger\"][\"MicroF1\"]}\n )\n\n if adj_mat_measures:\n self.summary_writer.add_scalars(\n f\"{dataset_name}/{span_type}/adj_mat\",\n adj_mat_measures,\n global_step=epoch,\n )\n if raw_combination_measures:\n self.summary_writer.add_scalars(\n f\"{dataset_name}/{span_type}/rawCombination\",\n raw_combination_measures,\n global_step=epoch,\n )\n if connection_measures:\n self.summary_writer.add_scalars(\n f\"{dataset_name}/{span_type}/connection\",\n connection_measures,\n global_step=epoch,\n )\n if trigger_measures:\n self.summary_writer.add_scalars(\n f\"{dataset_name}/{span_type}/trigger\",\n trigger_measures,\n global_step=epoch,\n )\n\n return total_event_decode_results, total_eval_res\n\n def reevaluate_dee_prediction(\n self,\n max_epoch=100,\n target_file_pre=\"dee_eval\",\n target_file_suffix=\".pkl\",\n dump_flag=False,\n ):\n \"\"\"Enumerate the evaluation directory to collect all dumped evaluation results\"\"\"\n eval_dir_path = self.setting.output_dir\n logger.info(\"Re-evaluate dee predictions from {}\".format(eval_dir_path))\n doc_type2data_span_type2model_str2epoch_res_list = {}\n pkl_match_name = os.path.join(\n eval_dir_path, f\"{target_file_pre}.*{target_file_suffix}\"\n )\n pkl_matched_names = glob.glob(pkl_match_name)\n pbar = tqdm(pkl_matched_names, desc=\"ReEval\", ncols=80, ascii=True)\n for fn in pbar:\n fn = os.path.split(fn)[-1]\n fn_splits = fn.split(\".\")\n if (\n fn.startswith(target_file_pre)\n and fn.endswith(target_file_suffix)\n and len(fn_splits) == 6\n ):\n _, data_type, span_type, model_str, epoch, _ = fn_splits\n epoch = int(epoch)\n if epoch > max_epoch:\n continue\n\n if data_type == \"dev\":\n features = self.dev_features\n elif data_type == \"test\":\n features = self.test_features\n else:\n raise Exception(\"Unsupported data type {}\".format(data_type))\n\n fp = os.path.join(eval_dir_path, fn)\n # self.logging('Re-evaluating {}'.format(fp))\n event_decode_results = default_load_pkl(fp)\n total_eval_res = measure_dee_prediction(\n self.event_template.event_type_fields_list,\n features,\n event_decode_results,\n self.setting.event_relevant_combination,\n )\n\n for doc_type in [\"o2o\", \"o2m\", \"m2m\", \"overall\"]:\n if doc_type not in doc_type2data_span_type2model_str2epoch_res_list:\n doc_type2data_span_type2model_str2epoch_res_list[doc_type] = {}\n\n data_span_type = (data_type, span_type)\n if (\n data_span_type\n not in doc_type2data_span_type2model_str2epoch_res_list[\n doc_type\n ]\n ):\n doc_type2data_span_type2model_str2epoch_res_list[doc_type][\n data_span_type\n ] = {}\n model_str2epoch_res_list = (\n doc_type2data_span_type2model_str2epoch_res_list[doc_type][\n data_span_type\n ]\n )\n\n if model_str not in model_str2epoch_res_list:\n model_str2epoch_res_list[model_str] = []\n epoch_res_list = model_str2epoch_res_list[model_str]\n\n epoch_res_list.append((epoch, total_eval_res[doc_type]))\n\n if dump_flag:\n fp = fp.rstrip(\".pkl\") + \".json\"\n # self.logging('Dumping {}'.format(fp))\n default_dump_json(total_eval_res, fp)\n logger.info(pbar)\n\n for (\n doc_type,\n data_span_type2model_str2epoch_res_list,\n ) in doc_type2data_span_type2model_str2epoch_res_list.items():\n for (\n data_span_type,\n model_str2epoch_res_list,\n ) in data_span_type2model_str2epoch_res_list.items():\n for model_str, epoch_res_list in model_str2epoch_res_list.items():\n epoch_res_list.sort(key=lambda x: x[0])\n\n return doc_type2data_span_type2model_str2epoch_res_list\n\n def ensemble_dee_prediction(self, curr_best_pkl_filepath, esmb_best_pkl_filepath):\n \"\"\"ensembling based on absent-filling strategy\"\"\"\n curr_encode_results = default_load_pkl(curr_best_pkl_filepath)\n esmb_encode_results = default_load_pkl(esmb_best_pkl_filepath)\n new_results = []\n for curr, esmb in zip(curr_encode_results, esmb_encode_results):\n if all(x is None for x in curr[2]):\n new_results.append(esmb)\n else:\n new_results.append(curr)\n\n total_eval_res = measure_dee_prediction(\n self.event_template.event_type_fields_list,\n self.test_features,\n new_results,\n self.setting.event_relevant_combination,\n )\n print_data = []\n results = {\n \"ModelType\": \"ensemble\",\n \"o2o\": {\n \"classification\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"entity\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"combination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"rawCombination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"overall\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"instance\": {\"precision\": None, \"recall\": None, \"f1\": None},\n },\n \"o2m\": {\n \"classification\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"entity\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"combination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"rawCombination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"overall\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"instance\": {\"precision\": None, \"recall\": None, \"f1\": None},\n },\n \"m2m\": {\n \"classification\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"entity\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"combination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"rawCombination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"overall\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"instance\": {\"precision\": None, \"recall\": None, \"f1\": None},\n },\n \"overall\": {\n \"classification\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"entity\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"combination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"rawCombination\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"overall\": {\"precision\": None, \"recall\": None, \"f1\": None},\n \"instance\": {\"precision\": None, \"recall\": None, \"f1\": None},\n },\n }\n MEASURE_TYPES = [\n \"classification\",\n \"entity\",\n \"combination\",\n \"rawCombination\",\n \"overall\",\n \"instance\",\n ]\n headers = []\n for measure_type in MEASURE_TYPES:\n if measure_type in total_eval_res[\"overall\"].keys():\n headers.append(measure_type)\n header = \"Data\\t{}\".format(\n \"\\t\".join(list(map(lambda x: \"{:20}\".format(x.title()), headers)))\n )\n logger.info(header)\n logger.info(\" \\t{}\".format(\"Prec\\tRecall\\tF1\\t\" * len(headers)))\n for data_type in [\"o2o\", \"o2m\", \"m2m\", \"overall\"]:\n result = total_eval_res[data_type]\n tmp_print = [data_type]\n for measure_type in MEASURE_TYPES:\n if measure_type in result:\n tmp_print.extend(\n [\n result[measure_type][\"MicroPrecision\"],\n result[measure_type][\"MicroRecall\"],\n result[measure_type][\"MicroF1\"],\n ]\n )\n results[data_type][measure_type][\"precision\"] = \"{:.3f}\".format(\n result[measure_type][\"MicroPrecision\"] * 100\n )\n results[data_type][measure_type][\"recall\"] = \"{:.3f}\".format(\n result[measure_type][\"MicroRecall\"] * 100\n )\n results[data_type][measure_type][\"f1\"] = \"{:.3f}\".format(\n result[measure_type][\"MicroF1\"] * 100\n )\n print_data.append(tmp_print)\n\n for ds in print_data:\n for d in ds:\n if isinstance(d, float):\n logger.info(\"{:.3f}\".format(d * 100), end=\"\\t\")\n else:\n logger.info(\"{}\".format(d), end=\"\\t\")\n logger.info()\n\n @torch.no_grad()\n def predict_one(self, string):\n self.model.eval()\n guid = \"PREDICTION\"\n data = convert_string_to_raw_input(guid, string)\n examples = [\n self.example_loader_func.convert_dict_to_example(\n data[0], data[1], only_inference=True\n )\n ]\n features = self.feature_converter_func(examples)\n batch = self.custom_collate_fn(features)\n batch = self.set_batch_to_device(batch)\n batch_info = self.get_event_decode_result_on_batch(\n batch, features=features, use_gold_span=False, heuristic_type=None\n )\n example = examples[0]\n doc_fea = features[0]\n result = batch_info[0]\n\n doc_id = doc_fea.guid\n event_list = []\n mspans = []\n event_types = []\n for eid, r in enumerate(result[1]):\n if r == 1:\n event_types.append(self.event_type_fields_pairs[eid][0])\n\n doc_arg_rel_info = result[3]\n mention_drange_list = doc_arg_rel_info.mention_drange_list\n mention_type_list = doc_arg_rel_info.mention_type_list\n doc_token_ids = doc_fea.doc_token_ids.detach().tolist()\n for drange, ment_type in zip(mention_drange_list, mention_type_list):\n mspan = self.tokenizer.convert_ids_to_tokens(\n doc_token_ids[drange[0]][drange[1] : drange[2]]\n )\n if all(x.upper() != \"[UNK]\" for x in mspan):\n mspan = \"\".join(mspan)\n offset = int(self.feature_converter_func.include_cls)\n matched_drange = [drange[0], drange[1] - offset, drange[2] - offset]\n else:\n mspan, matched_drange = match_arg(\n example.sentences,\n doc_fea.doc_token_ids.numpy(),\n doc_token_ids[drange[0]][drange[1] : drange[2]],\n offset=int(self.feature_converter_func.include_cls),\n )\n mtype = self.setting.tag_id2tag_name[ment_type][2:]\n t_mspan = {\"mspan\": mspan, \"mtype\": mtype, \"drange\": matched_drange}\n if t_mspan not in mspans:\n mspans.append(t_mspan)\n\n for event_idx, events in enumerate(result[2]):\n if events is None:\n continue\n for ins in events:\n if all(x is None for x in ins):\n continue\n tmp_ins = {\n \"event_type\": self.event_template.event_type_fields_list[event_idx][\n 0\n ],\n \"arguments\": None,\n }\n arguments = []\n for field_idx, args in enumerate(ins):\n if args is None:\n continue\n if not isinstance(args, set):\n args = {args}\n for arg in args:\n arg_tmp = self.tokenizer.convert_ids_to_tokens(arg)\n if all(x.upper() != \"[UNK]\" for x in arg_tmp):\n real_arg = \"\".join(arg_tmp)\n else:\n real_arg, _ = match_arg(\n example.sentences,\n doc_fea.doc_token_ids.numpy(),\n arg,\n offset=int(self.feature_converter_func.include_cls),\n )\n if real_arg is None:\n self.logging(\n f\"doc: {doc_id}, arg ({arg_tmp}) with UNK but original text not found\",\n level=logging.WARNING,\n )\n real_arg = arg_tmp\n arguments.append(\n {\n \"role\": self.event_template.event_type_fields_list[\n event_idx\n ][1][field_idx],\n \"argument\": real_arg,\n }\n )\n tmp_ins[\"arguments\"] = arguments\n if tmp_ins not in event_list:\n event_list.append(tmp_ins)\n\n event_list_merge_flag = [True for _ in range(len(event_list))]\n for ins1, ins2 in combinations(enumerate(event_list), 2):\n if ins1[1][\"event_type\"] == ins2[1][\"event_type\"]:\n ins1_args = {\n (arg[\"role\"], arg[\"argument\"]) for arg in ins1[1][\"arguments\"]\n }\n ins2_args = {\n (arg[\"role\"], arg[\"argument\"]) for arg in ins2[1][\"arguments\"]\n }\n if ins1_args == ins2_args or ins2_args.issubset(ins1_args):\n event_list_merge_flag[ins2[0]] = False\n elif ins1_args.issubset(ins2_args):\n event_list_merge_flag[ins1[0]] = False\n new_event_list = []\n for flag, events in zip(event_list_merge_flag, event_list):\n if flag:\n new_event_list.append(events)\n\n doc_res = {\n \"id\": doc_id,\n \"event_list\": new_event_list,\n \"comments\": {\n \"pred_types\": event_types,\n \"mspans\": mspans,\n \"sentences\": example.sentences,\n },\n }\n return doc_res\n\n def debug_display(self, doc_type, span_type, epoch, midout_dir):\n import json\n\n from transformers import BertTokenizer\n\n from dee.helper import DEEArgRelFeature, DEEFeature, measure_event_table_filling\n from dee.utils import (\n convert_role_fea_event_obj_to_standard,\n extract_combinations_from_event_objs,\n fill_diag,\n recover_ins,\n )\n\n tokenizer = BertTokenizer.from_pretrained(self.setting.bert_model)\n\n features = self.test_features\n output_dir = os.path.join(midout_dir, f\"{doc_type}/{span_type}\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n fn = f\"dee_eval.test.{span_type}.{self.setting.model_type}.{epoch}.pkl\"\n fp = os.path.join(self.setting.output_dir, fn)\n # self.logging('Re-evaluating {}'.format(fp))\n event_decode_results = default_load_pkl(fp)\n\n is_cg = False\n if len(features) > 0:\n if all(isinstance(feature, DEEArgRelFeature) for feature in features):\n is_cg = True\n elif all(isinstance(feature, DEEFeature) for feature in features):\n is_cg = False\n else:\n raise ValueError(\"Not all the features are in the same type!\")\n\n new_event_decode_results = copy.deepcopy(event_decode_results)\n filtered_event_decode_results = []\n for doc_fea, decode_result in zip(features, new_event_decode_results):\n if (\n doc_type != \"overall\"\n and doc_fea.doc_type != {\"o2o\": 0, \"o2m\": 1, \"m2m\": 2}[doc_type]\n ):\n continue\n filtered_event_decode_results.append(decode_result)\n\n pred_record_mat_list = []\n gold_record_mat_list = []\n pred_event_types = []\n gold_event_types = []\n pred_spans_token_tuple_list = []\n gold_spans_token_tuple_list = []\n pred_adj_mats = []\n gold_adj_mats = []\n pred_combinations = []\n gold_combinations = []\n new_features = []\n for term in filtered_event_decode_results:\n ex_idx, pred_event_type_labels, pred_record_mat, doc_span_info = term[:4]\n doc_fea = features[ex_idx]\n new_features.append(doc_fea)\n\n if is_cg:\n pred_adj_mat, event_idx2combinations = term[4:6]\n pred_adj_mats.append(pred_adj_mat)\n gold_adj_mats.append([doc_fea.whole_arg_rel_mat.reveal_adj_mat()])\n tmp_pred_combinations = set()\n for combinations_ in event_idx2combinations:\n combinations_ = [\n tuple(\n sorted(\n [doc_span_info.span_token_tup_list[arg] for arg in comb]\n )\n )\n for comb in combinations_\n ]\n tmp_pred_combinations.update(set(combinations_))\n pred_combinations.append(tmp_pred_combinations)\n # convert doc_fea.event_arg_idxs_objs_list and remove the role labels\n doc_fea.event_arg_idxs_objs_list = remove_event_obj_roles(\n doc_fea.event_arg_idxs_objs_list, self.event_type_fields_pairs\n )\n tmp_gold_combinations = extract_combinations_from_event_objs(\n doc_fea.event_arg_idxs_objs_list\n )\n tmp_gold_combinations = set(\n [\n tuple(\n sorted([doc_fea.span_token_ids_list[arg] for arg in comb])\n )\n for comb in tmp_gold_combinations\n ]\n )\n gold_combinations.append(tmp_gold_combinations)\n\n pred_event_types.append(pred_event_type_labels)\n gold_event_types.append(doc_fea.event_type_labels)\n pred_spans_token_tuple_list.append(doc_span_info.span_token_tup_list)\n gold_spans_token_tuple_list.append(doc_fea.span_token_ids_list)\n\n pred_record_mat = [\n [\n [\n tuple(arg_tup) if arg_tup is not None else None\n for arg_tup in pred_record\n ]\n for pred_record in pred_records\n ]\n if pred_records is not None\n else None\n for pred_records in pred_record_mat\n ]\n gold_record_mat = [\n [\n [\n tuple(doc_fea.span_token_ids_list[arg_idx])\n if arg_idx is not None\n else None\n for arg_idx in event_arg_idxs\n ]\n for event_arg_idxs in event_arg_idxs_objs\n ]\n if event_arg_idxs_objs is not None\n else None # for events in each event type\n for event_arg_idxs_objs in doc_fea.event_arg_idxs_objs_list\n ]\n pred_record_mat_list.append(pred_record_mat)\n gold_record_mat_list.append(gold_record_mat)\n\n g_eval_res = measure_event_table_filling(\n pred_record_mat_list,\n gold_record_mat_list,\n self.event_template.event_type_fields_list,\n pred_event_types,\n gold_event_types,\n pred_spans_token_tuple_list,\n gold_spans_token_tuple_list,\n pred_adj_mats=pred_adj_mats,\n gold_adj_mats=gold_adj_mats,\n pred_combinations=pred_combinations,\n gold_combinations=gold_combinations,\n dict_return=True,\n )\n print_data = {\n \"classification\": {\n \"p\": g_eval_res[\"classification\"][\"MicroPrecision\"],\n \"r\": g_eval_res[\"classification\"][\"MicroRecall\"],\n \"f1\": g_eval_res[\"classification\"][\"MicroF1\"],\n },\n \"entity\": {\n \"p\": g_eval_res[\"entity\"][\"MicroPrecision\"],\n \"r\": g_eval_res[\"entity\"][\"MicroRecall\"],\n \"f1\": g_eval_res[\"entity\"][\"MicroF1\"],\n },\n \"combination\": {\n \"p\": g_eval_res[\"combination\"][\"MicroPrecision\"],\n \"r\": g_eval_res[\"combination\"][\"MicroRecall\"],\n \"f1\": g_eval_res[\"combination\"][\"MicroF1\"],\n },\n \"rawCombination\": {\n \"p\": g_eval_res[\"rawCombination\"][\"MicroPrecision\"],\n \"r\": g_eval_res[\"rawCombination\"][\"MicroRecall\"],\n \"f1\": g_eval_res[\"rawCombination\"][\"MicroF1\"],\n },\n \"overall\": {\n \"p\": g_eval_res[\"overall\"][\"MicroPrecision\"],\n \"r\": g_eval_res[\"overall\"][\"MicroRecall\"],\n \"f1\": g_eval_res[\"overall\"][\"MicroF1\"],\n },\n \"instance\": {\n \"p\": g_eval_res[\"instance\"][\"MicroPrecision\"],\n \"r\": g_eval_res[\"instance\"][\"MicroRecall\"],\n \"f1\": g_eval_res[\"instance\"][\"MicroF1\"],\n },\n \"adj_mat\": g_eval_res[\"adj_mat\"][\"Accuracy\"],\n }\n logger.info(json.dumps(print_data, indent=2, ensure_ascii=False))\n type_names = [x[0] for x in self.event_template.event_type_fields_list]\n for (\n doc_fea,\n pred_record_mat,\n gold_record_mat,\n pred_event_type,\n gold_event_type,\n pred_spans_token_tuple,\n gold_spans_token_tuple,\n pred_adj_mat,\n gold_adj_mat,\n pred_combination,\n gold_combination,\n ) in zip(\n new_features,\n pred_record_mat_list,\n gold_record_mat_list,\n pred_event_types,\n gold_event_types,\n pred_spans_token_tuple_list,\n gold_spans_token_tuple_list,\n pred_adj_mats,\n gold_adj_mats,\n pred_combinations,\n gold_combinations,\n ):\n if pred_record_mat != gold_record_mat:\n continue\n texts = []\n for line in doc_fea.doc_token_ids:\n texts.append(\n \"\".join(\n filter(\n lambda x: x != \"[PAD]\",\n tokenizer.convert_ids_to_tokens(line.tolist()),\n )\n )\n )\n mid_result = {\n \"doc_type\": [\"o2o\", \"o2m\", \"m2m\", \"unk\"][doc_fea.doc_type],\n \"span_type\": span_type,\n \"guid\": doc_fea.guid,\n \"texts\": texts,\n \"pred_event_type\": list(\n map(\n lambda x: x[0],\n filter(lambda x: x[1] == 1, zip(type_names, pred_event_type)),\n )\n ),\n \"gold_event_type\": list(\n map(\n lambda x: x[0],\n filter(lambda x: x[1] == 1, zip(type_names, gold_event_type)),\n )\n ),\n \"pred_ents\": [\n \"\".join(tokenizer.convert_ids_to_tokens(span))\n for span in pred_spans_token_tuple\n ],\n \"gold_ents\": [\n \"\".join(tokenizer.convert_ids_to_tokens(span))\n for span in gold_spans_token_tuple\n ],\n \"pred_adj_mat\": fill_diag(pred_adj_mat[0], -1),\n \"gold_adj_mat\": gold_adj_mat[0],\n \"pred_comb\": [\n [\"\".join(tokenizer.convert_ids_to_tokens(arg)) for arg in comb]\n for comb in pred_combination\n ],\n \"gold_comb\": [\n [\"\".join(tokenizer.convert_ids_to_tokens(arg)) for arg in comb]\n for comb in gold_combination\n ],\n \"pred_ins\": recover_ins(\n self.event_template.event_type_fields_list,\n tokenizer.convert_ids_to_tokens,\n pred_record_mat,\n ),\n \"gold_ins\": recover_ins(\n self.event_template.event_type_fields_list,\n tokenizer.convert_ids_to_tokens,\n gold_record_mat,\n ),\n }\n with open(\n os.path.join(output_dir, f\"{doc_fea.guid}.json\"), \"wt\", encoding=\"utf-8\"\n ) as fout:\n json.dump(mid_result, fout, ensure_ascii=False, indent=2)\n\n def inference(self, dump_filepath=None, resume_epoch=1):\n import json\n\n import torch\n\n self.resume_cpt_at(resume_epoch)\n\n self.logging(\n \"=\" * 20 + \"Start Inference, Will Dump to: \" + dump_filepath + \"=\" * 20\n )\n\n # prepare data loader\n eval_dataloader = self.prepare_data_loader(\n self.inference_dataset, self.setting.eval_batch_size, rand_flag=False\n )\n\n # enter eval mode\n total_info = []\n if self.model is not None:\n self.model.eval()\n\n iter_desc = \"Inference\"\n if self.in_distributed_mode():\n iter_desc = \"Rank {} {}\".format(dist.get_rank(), iter_desc)\n\n for step, batch in enumerate(\n tqdm(eval_dataloader, desc=iter_desc, ncols=80, ascii=True)\n ):\n batch = self.set_batch_to_device(batch)\n\n with torch.no_grad():\n # this func must run batch_info = model(batch_input)\n # and metrics is an instance of torch.Tensor with Size([batch_size, ...])\n # to fit the DataParallel and DistributedParallel functionality\n batch_info = self.get_event_decode_result_on_batch(\n batch,\n features=self.inference_features,\n use_gold_span=False,\n heuristic_type=None,\n )\n # append metrics from this batch to event_info\n if isinstance(batch_info, torch.Tensor):\n total_info.append(\n batch_info.detach().cpu() # collect results in cpu memory\n )\n else:\n # batch_info is a list of some info on each example\n total_info.extend(batch_info)\n\n if isinstance(total_info[0], torch.Tensor):\n # transform event_info to torch.Tensor\n total_info = torch.cat(total_info, dim=0)\n\n assert (\n len(self.inference_examples)\n == len(self.inference_features)\n == len(total_info)\n )\n # example_list = self.inference_examples\n # feature_list = self.inference_features\n example_list = []\n feature_list = []\n for info in total_info:\n example_list.append(self.inference_examples[info[0]])\n feature_list.append(self.inference_features[info[0]])\n\n if dump_filepath is not None:\n with open(dump_filepath, \"wt\", encoding=\"utf-8\") as fout:\n for example, doc_fea, result in zip(\n example_list, feature_list, total_info\n ):\n assert doc_fea.ex_idx == result[0]\n\n doc_id = doc_fea.guid\n event_list = []\n mspans = []\n event_types = []\n for eid, r in enumerate(result[1]):\n if r == 1:\n event_types.append(self.event_type_fields_pairs[eid][0])\n\n doc_arg_rel_info = result[3]\n mention_drange_list = doc_arg_rel_info.mention_drange_list\n mention_type_list = doc_arg_rel_info.mention_type_list\n doc_token_ids = doc_fea.doc_token_ids.detach().tolist()\n\n for drange, ment_type in zip(\n mention_drange_list, mention_type_list\n ):\n mspan = self.tokenizer.convert_ids_to_tokens(\n doc_token_ids[drange[0]][drange[1] : drange[2]]\n )\n if all(x.upper() != \"[UNK]\" for x in mspan):\n mspan = \"\".join(mspan)\n offset = int(self.feature_converter_func.include_cls)\n matched_drange = [\n drange[0],\n drange[1] - offset,\n drange[2] - offset,\n ]\n else:\n mspan, matched_drange = match_arg(\n example.sentences,\n doc_fea.doc_token_ids.numpy(),\n doc_token_ids[drange[0]][drange[1] : drange[2]],\n offset=int(self.feature_converter_func.include_cls),\n )\n mtype = self.setting.tag_id2tag_name[ment_type][2:]\n t_mspan = {\n \"mspan\": mspan,\n \"mtype\": mtype,\n \"drange\": matched_drange,\n }\n if t_mspan not in mspans:\n mspans.append(t_mspan)\n\n for event_idx, events in enumerate(result[2]):\n if events is None:\n continue\n for ins in events:\n if all(x is None for x in ins):\n continue\n tmp_ins = {\n \"event_type\": self.event_template.event_type_fields_list[\n event_idx\n ][\n 0\n ],\n \"arguments\": None,\n }\n arguments = []\n for field_idx, args in enumerate(ins):\n if args is None:\n continue\n if not isinstance(args, set):\n args = {args}\n for arg in args:\n arg_tmp = self.tokenizer.convert_ids_to_tokens(arg)\n if all(x.upper() != \"[UNK]\" for x in arg_tmp):\n real_arg = \"\".join(arg_tmp)\n else:\n real_arg, _ = match_arg(\n example.sentences,\n doc_fea.doc_token_ids.numpy(),\n arg,\n offset=int(\n self.feature_converter_func.include_cls\n ),\n )\n if real_arg is None:\n self.logging(\n f\"doc: {doc_id}, arg ({arg_tmp}) with UNK but original text not found\",\n level=logging.WARNING,\n )\n real_arg = arg_tmp\n arguments.append(\n {\n \"role\": self.event_template.event_type_fields_list[\n event_idx\n ][\n 1\n ][\n field_idx\n ],\n \"argument\": real_arg,\n }\n )\n tmp_ins[\"arguments\"] = arguments\n event_list.append(tmp_ins)\n\n doc_res = {\n \"id\": doc_id,\n \"event_list\": event_list,\n \"comments\": {\n \"pred_types\": event_types,\n \"mspans\": mspans,\n \"sentences\": example.sentences,\n },\n }\n fout.write(f\"{json.dumps(doc_res, ensure_ascii=False)}\\n\")\n fout.flush()\n self.logging(f\"Results dumped to {dump_filepath}\")\n"
] |
[
[
"torch.cat",
"torch.optim.AdamW",
"torch.no_grad",
"torch.distributed.get_rank",
"torch.distributed.get_world_size"
]
] |
valeriupredoi/cmip5datafinder
|
[
"ac411f4194bac1f56decabec0a59ee349ed39f5e"
] |
[
"cmip5datafinder_v2.py"
] |
[
"#!/home/valeriu/sdt/bin/python\n\n\"\"\"\ncmip5datafinder.py\nPython 2.7.13\nScript that searches for data locally and on valid ESGF nodes. It builds \ncache files using the results of the search.\n\n\"\"\"\n\n# -------------------------------------------------------------------------\n# Setup.\n# -------------------------------------------------------------------------\n\n# ---- Import standard modules to the python path.\nimport sys, os, shutil, math, copy, getopt, re, string, popen2, time, errno\nimport numpy as np\nfrom numpy import loadtxt as lt\nfrom numpy import savetxt as st\nfrom xml.dom import minidom\nimport subprocess\nfrom datetime import datetime\nimport time\nimport data_finder as df\n\n__author__ = \"Valeriu Predoi <valeriu.predoi@ncas.ac.uk>\"\n\n# ---- Function usage.\n# ---- opts parsing\ndef usage():\n msg = \"\"\"\\\nThis is a flexible tool to generate cache files from local datasources (e.g. badc or dkrz \nmounted disks) and ESGF nodes. This makes use of synda for querying ESGF nodes as well.\nFor problems or queries, email valeriu.predoi@ncas.ac.uk. Have fun!\n\nCode functionality:\n1. Given a command line set of arguments or an input file, the code looks for cmip5\nfiles locally and returns the physical paths to the found files;\n2. If files are not found, the user has the option to download missing files from ESGF\nnodes via synda;\n3. Finally, the code writes cache files stored in a directory cache_files_[DATASOURCE]:\n - cache_cmip5_[DATASOURCE].txt local cache file with paths only on server [DATASOURCE]\n - cache_cmip5_combined_[DATASOURCE].txt combined local cache file (synda+DATASOURCE)\n - cache_cmip5_synda_[DATASOURCE].txt synda local cache file\n - cache_err.out errors sdout while caching\n - missing_cache_cmip5_[DATASOURCE].txt local missing files on [SERVER]\n - missing_cache_cmip5_combined_[DATASOURCE].txt missing files (synda+local)\n - missing_cache_cmip5_synda_[DATASOURCE].txt synda missing files\n4. Finally-finally it plots the overall, incomplete and missing files by model (png format).\n\nExample run:\npython cmip5datafinder.py -p PARAM_FILE --synda --download --dryrun --verbose --datasource badc\n\nDefinition of filedescriptor:\nTo understand the output, by filedescriptor we mean any file indicator\nof form e.g. CMIP5_MIROC5_Amon_historical_r1i1p1_2003_2010_hus that is fully\ndetermined by its parameters; there could be multiple .nc files\ncovering a single filedescriptor, alas there could be just one.\nAll cache files contain first a file indicator = filedescriptor e.g.\n\nCMIP5_MIROC5_Amon_historical_r1i1p1_2003_2010_hus\n\nUsage:\n cmip5datafinder.py [options]\n -p, --params-file <file> Namelist file (xml) or text file (txt) or any other input file [REQUIRED] \n e.g. for xml: --params-file ESMValTool/nml/namelist_myTest.xml\n e.g. for text: --params-file example.txt\n e.g. for yaml: --params-file example.yml\n This option is REQUIRED if --user-input (command line) is NOT present\n -h, --help Display this message and exit\n --user-input Flag for user defined CMIP file and variables parameters (to be input at command line\n with --fileparams for each parameter)\n This option is REQUIRED if --params-file is not present\n --datasource Name of local data source (example: badc). Available datasources:\n badc [to add more here, depending where running the code][REQUIRED]\n --synda Flag to call synda operations. If not passed, local datasources will be used ONLY\n --download Flag to allow download missing data via synda\n --dryrun Flag to pass if no download is wanted. Don't pass this if downloads are neeeded!\n If --dryrun in arguments, all cache files will be written as normal but with\n NOT-YET-INSTALLED flag per file\n --fileparams If --user-input is used, this serial option passes one data file argument at a time\n If --user-input is used, this serial option is REQUIRED\n e.g. --fileparams CMIP5 --fileparams MPI-ESM-LR --fileparams Amon --fileparams historical\n --fileparams r1i1p1 --fileparams 1910 --fileparams 1919\n --uservars If --user-input is used, this serial option passes one variable argument at a time\n If --user-input is used, this serial option is REQUIRED\n e.g. --uservars tro3\n --verbose Flag to show in-code detailed messages\n\nUnderstand the workflow:\n(1) python cmip5datafinder.py -p PARAM_FILE --datasource badc\n looks for files associated with data sources in PARAM_FILE locally on e.g badc only, in dirs in root /badc/cmip5/data/cmip5/output1/\n stores cache files in cache_files_badc/ and creates user-friendly cache cache_PARAM_FILE.txt-badc\n(2) python cmip5datafinder.py -p PARAM_FILE --synda --datasource badc\n same as (1), but it adds the local /sdt/data/ to the data lookup targets ONLY for incomplete/missing filedescriptors on badc; creates combined caches \n with data on badc and in /sdt/;\n creates the same user-friendly cache_PARAM_FILE.txt-badc that this time will include files present in /sdt/ too\n(3) python cmip5datafinder.py -p PARAM_FILE --synda --download --datasource badc\n same as (2) only this time the code will search for files that are incomplete or missing from badc AND /sdt/ over the net on ESGF nodes and will\n download them into /sdt/data/ if no --dryrun specified; NOTE that it is impossible to ask for download if prior checks in BOTH\n badc and /sdt/data/ have not been done (this is in place so that wild download will not happen);\n(4) python cmip5datafinder.py -p PARAM_FILE --synda --download --dryrun --datasource badc\n same as (3) but no actual downloads happen. \n\n\"\"\"\n print >> sys.stderr, msg\n\n########################################\n# ---- Operational functions here ---- #\n########################################\n\n# ---- get the path to synda executable\ndef which_synda(synda):\n \"\"\"\n\n This function returns the path to the synda exec\n or aborts the whole program if synda needs to be used\n but its executable is not found.\n\n \"\"\"\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(synda)\n if fpath:\n if is_exe(synda):\n #print('We are using the following executable: %s' % synda)\n return synda\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, synda)\n if is_exe(exe_file):\n return exe_file\n return None\n\n# ---- handling the years for files\ndef time_handling(year1, year1_model, year2, year2_model):\n \"\"\"\n This function is responsible for finding the correct \n files for the needed timespan:\n\n year1 - the start year in files\n year1_model - the needed start year of data\n year2 - the last year in files\n year2_model - the needed last year of data\n WARNINGS:\n we reduce our analysis only to years\n\n \"\"\"\n # model interval < data interval / file\n # model requirements completely within data stretch\n if year1 <= int(year1_model) and year2 >= int(year2_model):\n return True, True\n\n # model interval > data interval / file\n # data stretch completely within model requirements\n elif year1 >= int(year1_model) and year2 <= int(year2_model):\n return True, False\n\n # left/right overlaps and complete misses\n elif year1 <= int(year1_model) and year2 <= int(year2_model):\n # data is entirely before model\n if year2 < int(year1_model):\n return False, False\n # edge on\n elif year2 == int(year1_model):\n return True, False\n # data overlaps to the left\n elif year2 > int(year1_model):\n return True, False\n\n elif year1 >= int(year1_model) and year2 >= int(year2_model):\n # data is entirely after model\n if year1 > int(year2_model):\n return False, False\n # edge on\n elif year1 == int(year2_model):\n return True, False\n # data overlaps to the right\n elif year1 < int(year2_model):\n return True, False\n\n# ---- function to handle various date formats\ndef date_handling(time1,time2):\n \"\"\"\n This function deals with different input date formats e.g.\n time1 = 198204 or\n time1 = 19820422 or\n time1 = 198204220511 etc\n More formats can be coded in at this stage.\n Returns year 1 and year 2\n \"\"\"\n # yyyymm\n if len(list(time1)) == 6 and len(list(time2)) == 6:\n y1 = datetime.strptime(time1, '%Y%m')\n year1 = y1.year\n y2 = datetime.strptime(time2, '%Y%m')\n year2 = y2.year\n else:\n # yyyymmdd\n if len(list(time1)) == 8 and len(list(time2)) == 8:\n y1 = datetime.strptime(time1, '%Y%m%d')\n year1 = y1.year\n y2 = datetime.strptime(time2, '%Y%m%d')\n year2 = y2.year\n # yyyymmddHHMM\n if len(list(time1)) == 12 and len(list(time2)) == 12:\n y1 = datetime.strptime(time1, '%Y%m%d%H%M')\n year1 = y1.year\n y2 = datetime.strptime(time2, '%Y%m%d%H%M')\n year2 = y2.year\n return year1,year2\n\n# ---- cleanup duplicate entries in files\ndef fix_duplicate_entries(outfile):\n \"\"\"\n simple fast function to eliminate duplicate entries\n from a cache file\n \"\"\"\n # ---- fixing the cache file for duplicates\n ar = np.genfromtxt(outfile, dtype=str,delimiter='\\n')\n nar = np.unique(ar)\n st(outfile,nar,fmt='%s')\n\n# ---- synda search\ndef synda_search(model_data,varname):\n \"\"\"\n This function performs the search for files in synda-standard paths\n It takes exactly two arguments:\n - a model data string of type e.g. 'CMIP5 MPI-ESM-LR Amon amip r1i1p1'\n - a variable name as string e.g. 'tro3'\n It performs the search for files associated with these parameters and returns ALL\n available files. (command example: synda search -f CMIP5 MPI-ESM-LR Amon amip r1i1p1 tro3)\n\n \"\"\"\n # this is needed mostly for parallel processes that may\n # go tits-up from time to time due to random path mixes\n if which_synda('synda') is not None:\n pass\n else:\n print >> sys.stderr, \"No synda executable found in path. Exiting.\"\n sys.exit(1)\n synda_search = which_synda('synda') + ' search -f ' + model_data + ' ' + varname\n proc = subprocess.Popen(synda_search, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n if err is not None:\n print >> sys.stderr, \"An error has occured while searching for data:\"\n print >> sys.stderr, err\n sys.exit(1)\n else:\n return out\n\n\n# ---- cache local data\n#def write_cache_direct(params_file,ldir,rdir,outfile,outfile2,errfile,ld,verbose=False):\ndef write_cache_direct(params_file, rootp, outfile, outfile2, errfile, drs, verbose=False):\n\n \"\"\"\n Function that does direct parsing of available datasource files and establishes\n the paths to the needed files; makes use of find_local_files()\n File versioning is controlled by finding the ld = e.g. /latest/ dir \n in the badc datasource, this may differ on other clusters and should be correctly\n hardcoded in the code!\n\n \"\"\"\n car = np.genfromtxt(params_file, dtype=str, delimiter='\\n')\n # ---- eliminate duplicates from input file, if any\n nar = np.unique(car)\n prfile = 'prepended_' + params_file\n if len(nar) == 1:\n with open(prfile, 'a') as file:\n file.write(nar)\n file.write('\\n')\n file.write(nar)\n else:\n st(prfile,nar,fmt='%s')\n itemlist = lt(prfile,dtype=str)\n lenitemlist = len(itemlist)\n for item in itemlist:\n\n # new functionality using data_finder module\n # build the model dictionary\n # in file: CMIP5 MPI-ESM-LR Amon historical r1i1p1 1980 2005 pr\n # model dictionary: {name: MPI-ESM-LR, project: CMIP5, mip: Amon, exp: historical, ensemble: r1i1p1, start_year: 2000, end_year: 2002} \n model = {}\n model['project'] = item[0]\n model['name'] = item[1]\n model['mip'] = item[2]\n model['exp'] = item[3]\n model['ensemble'] = item[4]\n model['start_year'] = item[5]\n model['end_year'] = item[6]\n\n # build the var variable\n var = {}\n var['name'] = item[7]\n var['mip'] = item[2]\n var['exp'] = item[3]\n var['ensemble'] = item[4]\n\n arname = df.get_input_filelist(rootp, model, var, drs)\n\n # still keep all the infrastructure\n if len(arname) > 0:\n # var = item[7]\n header = item[0] + '_'+ item[1] + '_' + item[2]\\\n + '_' + item[3] + '_' + item[4] + '_' + item[5]\\\n + '_' + item[6] + '_' + item[7]\n yr1 = int(item[5])\n yr2 = int(item[6])\n for s in arname:\n ssp = s.split('/')\n av = ssp[-1]\n time_range = av.split('_')[-1].strip('.nc')\n time1 = time_range.split('-')[0]\n time2 = time_range.split('-')[1]\n year1 = date_handling(time1,time2)[0]\n year2 = date_handling(time1,time2)[1]\n # case where the required data completely overlaps\n # available data\n # this case stops the code to make a call to synda for this filedescriptor\n if time_handling(year1, yr1, year2, yr2)[0] is True and time_handling(year1, yr1, year2, yr2)[1] is True:\n if os.path.exists(s):\n with open(outfile, 'a') as file:\n file.write(header + ' ' + s + '\\n')\n if verbose is True:\n print('Cached file from local datasource: ' + s)\n else:\n with open(outfile2, 'a') as file:\n file.write(header + ' ERROR-MISSING' + '\\n')\n if verbose is True:\n print('WARNING: missing from local datasource: ' + header)\n # case where the required data is not fully found\n # ie incomplete data \n # what we want to do here is cache what we have available\n # but also let synda know there is missing data, maybe\n # she can find it...just maybe\n # also we must make sure she doesnt download what we already have\n if time_handling(year1, yr1, year2, yr2)[0] is True and time_handling(year1, yr1, year2, yr2)[1] is False:\n if os.path.exists(s):\n with open(outfile, 'a') as file:\n file.write(header + ' ' + s + '\\n')\n if verbose is True:\n print('Cached file from local datasource: ' + s)\n sfn = s.split('/')[-1]\n with open(outfile2, 'a') as file:\n # the INCOMPLETE indicator will be used\n # to label partially complete filedescriptors so synda can\n # look for the missing bits and hopefully complete it\n file.write(header + ' INCOMPLETE ' + sfn + '\\n')\n else:\n with open(outfile2, 'a') as file:\n file.write(header + ' ERROR-MISSING' + '\\n')\n if verbose is True:\n print('WARNING: missing from local datasource: ' + header)\n else:\n with open(outfile2, 'a') as file:\n # missing entirely\n file.write(\"_\".join(item) + ' ERROR-MISSING' + '\\n')\n if verbose is True:\n print('WARNING: missing from local datasource: ' + \"_\".join(item))\n if os.path.exists(outfile):\n fix_duplicate_entries(outfile)\n else:\n print >> sys.stderr, \"WARNING: could not cache any data from local datasource\"\n if os.path.exists(outfile2):\n fix_duplicate_entries(outfile2)\n else:\n print >> sys.stderr, \"Cached all needed data from local datasource. Looks like there are no missing files, huzzah!\"\n\n# ---- print some stats\ndef print_stats(outfile1,outfile2):\n \"\"\"\n small function to print some stats at the end\n \"\"\"\n if os.path.exists(outfile1) and os.path.exists(outfile2):\n ar1 = np.genfromtxt(outfile1, dtype=str,delimiter='\\n')\n ar2 = np.genfromtxt(outfile2, dtype=str,delimiter='\\n')\n # force to a 1-liner\n if ar1.ndim == 0:\n f = 1\n else:\n f = len(ar1)\n if ar2.ndim == 0:\n m = 1\n else:\n m = len(ar2)\n print('\\n###############################################################')\n print(' Found and cached: %i individual .nc files cached' % f)\n print('Missing/incomplete: %i individual datasets NOT cached/incomplete' % m)\n print('#################################################################\\n')\n elif os.path.exists(outfile1) and os.path.exists(outfile2) is False:\n ar1 = np.genfromtxt(outfile1, dtype=str,delimiter='\\n')\n if ar1.ndim == 0:\n f = 1\n else:\n f = len(ar1)\n print('\\n########################################################')\n print('Found and cached: %i individual .nc files cached' % f)\n print('########################################################\\n')\n elif os.path.exists(outfile1) is False:\n print('Shoot! No cache written this time around...') \n\n# ---- synda download\ndef synda_dll(searchoutput,varname,year1_model,year2_model,header,D,outfile,outfile2,download=False,dryrunOn=False,verbose=False):\n \"\"\"\n This function takes the standard search output from synda\n and parses it to see if/what files need to be downloaded\n\n The searchoutput argument is a string and is of the form e.g.\n\n new 221.2 MB cmip5.output1.MPI-M.MPI-ESM-LR.historical.mon.atmos.Amon.r1i1p1.v20120315.tro3_Amon_MPI-ESM-LR_historical_r1i1p1_195001-195912.nc\n done 132.7 MB cmip5.output1.MPI-M.MPI-ESM-LR.historical.mon.atmos.Amon.r1i1p1.v20120315.tro3_Amon_MPI-ESM-LR_historical_r1i1p1_200001-200512.nc\n new 221.2 MB cmip5.output1.MPI-M.MPI-ESM-LR.historical.mon.atmos.Amon.r1i1p1.v20120315.tro3_Amon_MPI-ESM-LR_historical_r1i1p1_185001-185912.nc\n \n ie typical synda file search output. This gets parsed in and analyzed\n against the required model file characterstics and files that comply can\n be downloaded via synda install. It also takes the year1_model and year2_model, for time checks.\n It also takes the variable name and the name of a cache file outfile that will be written to disk. \n dryrunOn is the switch from a physical download to just polling the esgf node without any download.\n\n varname: variable\n D: incomplete filedescriptors: the dictionary that contains the files that are already available locally\n year1_model, year2_model: needed filedescriptor year1 and 2\n header: unique filedescriptor indicator e.g. CMIP5_CNRM-CM5_Amon_historical_r1i1p1_2003_2010_hus\n outfile: cache file\n outfile2: missing cache file\n download: download (either dryrun or for reals) flag \n \n \"\"\"\n # this is needed mostly for parallel processes that may\n # go tits-up from time to time due to random path mixes\n if which_synda('synda') is not None:\n pass\n else:\n print >> sys.stderr, \"No synda executable found in path. Exiting.\"\n sys.exit(1)\n entries = searchoutput.split('\\n')[:-1]\n if len(entries) > 0:\n for entry in entries:\n label=str(entry.split()[0])\n file_name = entry.split()[3]\n if header.split('_')[1] == file_name.split('.')[3]:\n time_range = file_name.split('_')[-1].strip('.nc')\n time1 = time_range.split('-')[0]\n time2 = time_range.split('-')[1]\n year1 = date_handling(time1,time2)[0]\n year2 = date_handling(time1,time2)[1]\n if time_handling(year1, year1_model, year2, year2_model)[0] is True:\n if label=='done':\n file_name_complete = \".\".join(file_name.split('.')[:10]) + '.' + varname + '.' + \".\".join(file_name.split('.')[10:])\n filepath_complete = '/sdt/data/c' + file_name_complete.replace('.','/').strip('/nc') + '.nc'\n fn = filepath_complete.split('/')[-1]\n # synda should not cache files in dictionary D\n # these belong to incomplete filedescriptors but are already on disk\n if fn not in D[header]:\n with open(outfile, 'a') as file:\n file.write(header + ' ' + filepath_complete + ' ' + 'INSTALLED' + '\\n')\n if verbose is True:\n print('File exists in local /sdt/data, path: ' + filepath_complete)\n # no download #\n elif label=='new':\n if download is True:\n file_name_new = \".\".join(file_name.split('.')[:10]) + '.' + varname + '.' + \".\".join(file_name.split('.')[10:])\n filepath_new = '/sdt/data/c' + file_name_new.replace('.','/').strip('/nc') + '.nc'\n fn = filepath_new.split('/')[-1]\n # synda should not download files in dictionary D\n # these belong to incomplete filedescriptors but are already on disk\n if fn not in D[header]:\n if dryrunOn is True:\n if verbose is True:\n print('Needed file %s doesnt exist in local /sdt/data but is on ESGF nodes, enable download to get it' % file_name)\n print('Download enabled in dryrun mode...')\n print('Synda found file: ' + file_name)\n print('If installed, full path would be: ' + filepath_new)\n with open(outfile, 'a') as file:\n file.write(header + ' ' + filepath_new + ' ' + 'NOT-YET-INSTALLED' + '\\n')\n # no download, dryrun only #\n else:\n synda_install = which_synda('synda') + ' install ' + file_name\n proc = subprocess.Popen(synda_install, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)\n dll ='\\n'\n (out, err) = proc.communicate(input=dll)\n if err is not None:\n print >> sys.stderr, \"An error has occured while starting the download:\"\n print >> sys.stderr, err\n sys.exit(1)\n else:\n with open(outfile, 'a') as file:\n file.write(header + ' ' + filepath_new + ' ' + 'INSTALLED' + '\\n')\n if verbose is True:\n print('Needed file %s doesnt exist in local /sdt/data but is on ESGF nodes' % file_name)\n print('Download enabled in full install mode...')\n print('Downloading file: ' + file_name)\n print('Full path: ' + filepath_new)\n # yes download #\n else:\n if verbose is True:\n print('WARNING: synda - not cached due to requested period mismatch: ' + header + ' ' + file_name)\n return 0\n else:\n if verbose is True:\n print('WARNING: synda - not cached due to model mismatch: ' + header + ' ' + file_name)\n return 0\n else:\n if verbose is True:\n print('WARNING: synda - missing data altogether: ' + header)\n return 0\n if os.path.exists(outfile):\n fix_duplicate_entries(outfile)\n\ndef cache_merge(file1,file2,finalFile):\n \"\"\"\n Function that takes two cache files and merges them\n into a single one. Caution -- note the order:\n file1 = local datasource cache\n file2 = local synda cache\n \"\"\"\n f1 = open(file1, 'r')\n f2 = open(file2, 'r')\n ff = open(finalFile, 'w')\n r1 = f1.readlines()\n r2 = f2.readlines()\n for b in r2:\n for a in r1:\n if a.split()[1].split('/')[-1] == b.split()[1].split('/')[-1]:\n ff.write(a.split()[0] + ' ' + a.split()[1] + '\\n')\n else:\n ff.write(a.split()[0] + ' ' + a.split()[1] + '\\n')\n ff.write(b.split()[0] + ' ' + b.split()[1] + '\\n')\n ff.close()\n if os.path.exists(finalFile):\n fix_duplicate_entries(finalFile) \n \n# ---- final user-friendly cache generator\ndef final_cache(parfile,ofile1,finalfile):\n \"\"\"\n Function that generates the final user-friendly\n single cache file; this can easily be used\n in various analyses; file legend:\n Database | data_status | Percent complete | available_data\n ---------------------------------------------\n CMIP5_MIROC5_Amon_historical_r1i1p1_2003_2010_hus (complete,incomplete or missing) [file_list, if available]\n \"\"\"\n pparfile = 'prepended_' + parfile\n car = open(pparfile, 'r')\n lis = car.readlines()\n ff = open(finalfile, 'w')\n if os.path.exists(ofile1):\n of1 = open(ofile1, 'r')\n ofl1 = of1.readlines()\n o1 = [(a.split()[0],a.split()[1]) for a in ofl1]\n for b in lis:\n tt = []\n hh = []\n header = b.split()[0] + '_'+ b.split()[1] + '_' + b.split()[2]\\\n + '_' + b.split()[3] + '_' + b.split()[4] + '_' + b.split()[5]\\\n + '_' + b.split()[6] + '_' + b.split()[7]\n\n for h in o1:\n if header == h[0]:\n y = h[1].split('/')[-1].strip('.nc').split('_')[-1].split('-')\n # y could be some dodgy stuff if file not proper formatted\n if len(y) == 2: \n yr1 = date_handling(y[0],y[1])[0]\n yr2 = date_handling(y[0],y[1])[1]\n tt.append(yr1)\n tt.append(yr2)\n hh.append(h[1])\n else:\n print('File: _date1-date2.nc not properly formatted...skipping it')\n y1 = int(b.split()[5])\n y2 = int(b.split()[6])\n # let's see how we do with time\n if len(tt) > 0:\n if get_overlap(tt,y1,y2)[1] == 1:\n # we have contiguous time\n if get_overlap(tt,y1,y2)[0] == 1:\n ff.write(header + ' complete 1.0 ' + str(hh) + '\\n')\n else:\n fdt = get_overlap(tt,y1,y2)[0]\n ff.write(header + ' incomplete ' + '%.2f' % fdt + ' ' + str(hh) + '\\n')\n else:\n # we have gaps\n if get_overlap(tt,y1,y2)[0] == 1:\n ff.write(header + ' complete(DATAGAPS) 1.0 ' + str(hh) + '\\n')\n else:\n fdt = get_overlap(tt,y1,y2)[0]\n ff.write(header + ' incomplete(DATAGAPS) ' + '%.2f' % fdt + ' ' + str(hh) + '\\n')\n else:\n ff.write(header + ' missing' + '\\n')\n\n#---- function that returns the amount of overlap\n# between needed data and available data\ndef get_overlap(tt, my1, my2):\n \"\"\"\n function that returns the amount of overlap\n between needed data and available data\n Returns a fractional float\n li: list of years from data (1-dim, even number of elements)\n my1,my2: required model years\n \"\"\"\n nt = len(tt)\n my = float(my2 - my1)\n if nt == 2:\n # single file, no gaps in data\n if min(tt) >= my1 and max(tt) <= my2:\n # completely inside\n df = (max(tt) - min(tt))/my\n elif min(tt) >= my1 and max(tt) >= my2:\n # right plus\n df = (my2 - min(tt))/my\n elif min(tt) <= my1 and max(tt) <= my2:\n # left plus\n df = (max(tt) - my1)/my\n elif my1 >= min(tt) and my2 <= max(tt):\n df = 1\n return df,1\n else:\n #multiple files, checking for gaps in data\n b = max(tt) - min(tt)\n el = [tt[i] - tt[i-1] for i in range(1,nt)]\n if sum(el) == b:\n # multiple files, no gaps in data\n if min(tt) >= my1 and max(tt) <= my2:\n # completely inside\n df = (max(tt) - min(tt))/my\n elif min(tt) >= my1 and max(tt) >= my2:\n # right plus\n df = (my2 - min(tt))/my\n elif min(tt) <= my1 and max(tt) <= my2:\n # left plus\n df = (max(tt) - my1)/my\n elif my1 >= min(tt) and my2 <= max(tt):\n df = 1\n return df,1\n else:\n # there are gaps!!\n # but we dont deal with them here\n df = 1\n print('WARNING: there are gaps in data!')\n print(tt)\n return df,2\n\n \n #dtl = [tt[i] - tt[i-1] for i in range(1,nt)]\n \n\ndef print_final_stats(sfile):\n \"\"\"\n print some final stats\n To understand the output, by filedescriptor we mean any file indicator\n of form e.g. CMIP5_MIROC5_Amon_historical_r1i1p1_2003_2010_hus that is fully\n determined by its parameters; there could be multiple .nc files\n covering a single filedescriptor, alas there could be just one.\n \"\"\"\n ff = open(sfile, 'r')\n lff = ff.readlines()\n c = [a for a in lff if a.split()[1] == 'complete']\n ic = [b for b in lff if b.split()[1] == 'incomplete']\n mi = [d for d in lff if d.split()[1] == 'missing']\n gc = [a for a in lff if a.split()[1] == 'complete(DATAGAPS)']\n gic = [b for b in lff if b.split()[1] == 'incomplete(DATAGAPS)']\n prcc = [float(a.split()[2]) for a in lff if a.split()[1] == 'incomplete']\n print('---------------------------')\n if len(gc) != 0 and len(gic) != 0:\n print('============================')\n print('WARNING: THERE ARE DATA GAPS!')\n print('============================')\n print(' Total needed filedescriptors: %i' % len(lff))\n print(' Complete filedescriptors: %i' % len(c))\n print(' Incomplete filedescriptors: %i' % len(ic))\n print(' Missing filedescriptors: %i' % len(mi))\n print(' Complete dbs with gaps: %i' % len(gc))\n print(' Incomplete dbs with gaps: %i' % len(gic))\n print(' Avg coverage for incomplete: %.2f' % np.mean(prcc))\n print('---------------------------')\n\n# ---- plotting the filedescriptors in pie charts\ndef plotter(cachefile,saveDir):\n \"\"\"\n simple pie chart plotting function\n \"\"\"\n # get matplotlib\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n # plot overall\n ff = open(cachefile,'r')\n lff = ff.readlines()\n c = [a for a in lff if a.split()[1] == 'complete']\n ic = [b for b in lff if b.split()[1] == 'incomplete']\n mi = [d for d in lff if d.split()[1] == 'missing']\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = 'complete', 'incomplete', 'missing'\n sizes = [len(c), len(ic), len(mi)]\n explode = (0, 0.1, 0) # only \"explode\" the 2nd slice (i.e. 'incomplete')\n # plot\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Overall data coverage')\n saveLoc = saveDir + '/overall.png'\n plt.savefig(saveLoc)\n # plot only missing\n c2 = [a.split()[0].split('_')[1] for a in lff if a.split()[1] == 'missing']\n c2s = list(set(c2))\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = c2s\n sizes = [c2.count(a) for a in c2s]\n # plot\n fig2, ax2 = plt.subplots()\n ax2.pie(sizes, labels=labels, autopct='%1.1f%%',startangle=90)\n ax2.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Missing data by model')\n saveLoc = saveDir + '/missing.png'\n plt.savefig(saveLoc)\n # plot only incomplete\n c2 = [a.split()[0].split('_')[1] for a in lff if a.split()[1] == 'incomplete']\n c2s = list(set(c2))\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = c2s\n sizes = [c2.count(a) for a in c2s]\n # plot\n fig3, ax3 = plt.subplots()\n ax3.pie(sizes, labels=labels, autopct='%1.1f%%',startangle=90)\n ax3.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Incomplete data by model')\n saveLoc = saveDir + '/incomplete.png'\n plt.savefig(saveLoc)\n\n# ---- synda check download\ndef synda_check_dll():\n \"\"\"\n Easy checker on current downloads\n \"\"\"\n print('Your files(s) are being downloaded.')\n print('You can check the download progress with synda queue, see output below')\n synda_queue = 'synda queue'\n proc = subprocess.Popen(synda_queue, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n print(out)\n statusreport = out.split('\\n')\n for entry in statusreport:\n if len(entry)>0:\n if entry.split()[0] == 'waiting':\n print('%i files are waiting, totalling %.2f MB disk' % (int(entry.split()[1]),float(entry.split()[2])))\n synda_watch = 'synda watch'\n proc = subprocess.Popen(synda_watch, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n print(out)\n\n# -------------------------------------------------------------------------\n# Parse the command line options.\n# -------------------------------------------------------------------------\n\n# ---- Initialise command line argument variables.\nparams_file = None\nuserVars = False\ndb = []\nsyndacall = False\ndownload = False\ndryrunOn = False\nfpars = []\nvpars = []\nverbose = False\n\n# ---- Syntax of options, as required by getopt command.\n# ---- Short form.\nshortop = \"hp:g:r:d:i:n:t:f:m:sc:e:\"\n# ---- Long form.\nlongop = [\n \"help\",\n \"params-file=\",\n \"user-input\",\n \"datasource=\",\n \"synda\",\n \"download\",\n \"dryrun\",\n \"fileparams=\",\n \"uservars=\",\n \"verbose\"\n]\n\n# ---- Get command-line arguments.\ntry:\n opts, args = getopt.getopt(sys.argv[1:], shortop, longop)\nexcept getopt.GetoptError:\n usage()\n sys.exit(1)\n\n# ---- We will record the command line arguments to cmip5datafinder.py in a file called\n# cmip5datafinder.param. This file should be used if a further need to run the code arises\ncommand_string = 'cmip5datafinder.py '\n\n# ---- Parse command-line arguments. Arguments are returned as strings, so\n# convert type as necessary.\nfor o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit(0)\n elif o in (\"-p\", \"--params-file\"):\n params_file = a\n command_string = command_string + ' -p ' + a\n elif o in (\"--user-input\"):\n userVars = True\n command_string = command_string + ' --user-input '\n elif o in (\"--datasource\"):\n db.append(a)\n command_string = command_string + ' --datasource ' + a\n elif o in (\"--synda\"):\n syndacall = True\n command_string = command_string + ' --synda '\n elif o in (\"--download\"):\n download = True\n command_string = command_string + ' --download '\n elif o in (\"--dryrun\"):\n dryrunOn = True\n command_string = command_string + ' --dryrun '\n elif o in (\"--fileparams\"):\n fpars.append(a)\n command_string = command_string + ' --fileparams ' + a\n elif o in (\"--uservars\"):\n vpars.append(a)\n command_string = command_string + ' --uservars ' + a \n elif o in (\"--verbose\"):\n verbose = True\n command_string = command_string + ' --verbose '\n else:\n print >> sys.stderr, \"Unknown option:\", o\n usage()\n sys.exit(1)\n\n# ---- Check that all required arguments are specified, else exit.\nif not params_file:\n if not userVars:\n print >> sys.stderr, \"No parameter file specified and no user file definitions\"\n print >> sys.stderr, \"Use --params-file to specify the parameter file or --user-vars followed\"\n print >> sys.stderr, \"by command-line options for file parameters. Exiting.\"\n sys.exit(1)\n else:\n print >> sys.stderr, \"Using the user's specified file parameters\"\n if not fpars:\n print >> sys.stderr, \"You need to specify a number of file params e.g. CMIP5,MPI-ESM-LR,Amon,historical,r1i1p1,1910,1919\"\n print >> sys.stderr, \"Use the --fileparams option for this\"\n sys.exit(1)\n if not vpars:\n print >> sys.stderr, \"You need to specify a number of variables e.g. tro3\"\n print >> sys.stderr, \"Use the --uservars option for this\"\n sys.exit(1)\nif params_file and userVars:\n print >> sys.stderr, \"Use --params-file to specify the parameter file OR --user-input followed\"\n print >> sys.stderr, \"by command-line options for file and variables parameters. Can not use both options! Exiting.\"\n sys.exit(1)\nif not db:\n print >> sys.stderr, \"No local datasource to search specified\"\n print >> sys.stderr, \"Use --datasource to specify a valid datasource e.g. badc or dkrz. Exiting...\"\n sys.exit(1)\n\n# -------------------------------------------------------------------------\n# Status message. Report all supplied arguments.\n# -------------------------------------------------------------------------\n\nif verbose is True:\n intro = \"\"\"\\\n This is a flexible tool to generate cache files from local datasources and ESGF nodes.\n This makes use of synda for querying ESGF nodes as well.\n For problems or queries, email valeriu.predoi@ncas.ac.uk. Have fun!\n \n Code functionality:\n 1. Given a command line set of arguments or an input file, the code looks for cmip5\n files locally and returns the physical paths to the found files;\n 2. If files are not found, the user has the option to download missing files from ESGF\n nodes via synda;\n 3. Finally, the code writes cache files:\n - cache_cmip5_[SERVER].txt local cache file\n - cache_cmip5_combined_[SERVER].txt combined cache file (synda+local)\n - cache_cmip5_synda_[SERVER].txt synda cache file\n - cache_err.out error out while caching\n - missing_cache_cmip5_[SERVER].txt local missing files\n - missing_cache_cmip5_combined_[SERVER].txt missing files (synda+local)\n - missing_cache_cmip5_synda_[SERVER].txt synda missing files\n\n Example run:\n (with param file) python cmip5datafinder.py -p perfmetrics.txt --download --dryrun --verbose --datasource badc\n (with command line args) python cmip5datafinder.py --user-input --fileparams CMIP5 --fileparams bcc-csm1-1 --fileparams --fileparams Amon\n --fileparams historical --fileparams r1i1p1 --fileparams 1982 --fileparams 2014 --uservars clt --uservars tro3 --uservars pr --datasource badc\n --verbose\n \"\"\"\n print >> sys.stdout, intro\n print >> sys.stdout\n print >> sys.stdout, \"####################################################\"\n print >> sys.stdout, \"# CMIP5 Data Finder #\"\n print >> sys.stdout, \"####################################################\"\n print >> sys.stdout\n print >> sys.stdout, \"Parsed input arguments:\"\n print >> sys.stdout\n if params_file:\n print >> sys.stdout,\"Running with parameters file:\", params_file\n else:\n if len(fpars) < 7:\n print >> sys.stderr, \"Too few file parameters (CMIP5 needs exacly 7: e.g. CMIP5 MPI-ESM-LR Amon historical r1i1p1 1980 2005)\"\n sys.exit(1)\n else:\n print >> sys.stdout,\"Running with user-defined file parameters and variables \"\n print >> sys.stdout,\"File :\", fpars[0]\n print >> sys.stdout,\"Experiment:\", fpars[1]\n print >> sys.stdout,\"Medium :\", fpars[2]\n print >> sys.stdout,\"Type :\", fpars[3]\n print >> sys.stdout,\"Ensemble :\", fpars[4]\n print >> sys.stdout,\"Year1 :\", fpars[5]\n print >> sys.stdout,\"Year2 :\", fpars[6]\n print >> sys.stdout,\"Var(s) :\", vpars[0]\n print >> sys.stdout\n\n# ---- if we are using synda\n# ---- Get the synda path or exit here\nif syndacall is True:\n if verbose is True:\n print('You are going to use SYNDA to download data...')\n print('Looking up synda executable...')\n if which_synda('synda') is not None:\n print >> sys.stdout, \"Synda found...OK\" \n print >> sys.stdout, which_synda('synda')\n else:\n print >> sys.stderr, \"No synda executable found in path. Exiting.\"\n sys.exit(1)\n\n if verbose is True:\n # ---- Have us some information from the synda configuration file\n # ---- one can add more info if needed, currently just data server\n print('\\n---------------------------------------------')\n print('Information about synda configuration:')\n print('---------------------------------------------')\n synda_conf_file = which_synda('synda').rsplit('/',2)[0] + '/conf/sdt.conf'\n print ('Synda conf file %s' % synda_conf_file)\n with open(synda_conf_file, 'r') as file:\n for line in file:\n if line.split('=')[0]=='indexes':\n data_server = line.split('=')[1]\n print('ESGF data node: %s' % data_server.split()[0])\n\n# ---- Write ASCII file holding cache_BADC.py command.\npfile = open('cmip5datafinder.param','w')\npfile.write(command_string + \"\\n\")\npfile.close()\n\n# ---- Write cache files ---- #\n# ---- hardcoded names so we standardize analyses\n# ---- start overall timing\nt10 = time.time()\n# ---- db is a list and we run on each called datasource\nfor d in db:\n # we need to firstly remove any pre existent cache dirs\n drb = 'cache_files_' + d\n print('Removing all pre-existent cache directories...')\n if os.path.isdir(drb):\n rrc = 'rm -r ' + drb\n proc = subprocess.Popen(rrc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n print('Polling %s datasource...' % d)\n # ...then create new one, standard name cache_files_[SERVER] eg cache_files_badc\n print('We will be writing all needed cache files to %s directory...' % drb)\n mkc = 'mkdir -p ' + drb\n proc = subprocess.Popen(mkc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n # place the cache files\n pfile2 = drb + '/cache_cmip5_' + d + '.txt'\n pfile3 = drb + '/missing_cache_cmip5_' + d + '.txt'\n if syndacall is True:\n pfile4 = drb + '/cache_cmip5_synda_' + d + '.txt'\n pfile5 = drb + '/missing_cache_cmip5_synda_' + d + '.txt'\n errorfile = drb + '/cache_err.out'\n if params_file:\n nm = 'cache_' + params_file + '-' + d\n if os.path.exists(nm):\n os.remove(nm)\n else:\n nm = 'cache_user.txt-' + d\n if os.path.exists(nm):\n os.remove(nm)\n\n # ---- get root directory\n if verbose is True:\n print('Using %s as local searchable datasource' % d)\n if d == 'badc':\n rootp = '/badc/cmip5/data/cmip5/output1'\n drs = 'BADC'\n\n # ---- start timer\n t1 = time.time()\n \n # ---- get the params file\n if params_file:\n paramfile, paramfile_extension = os.path.splitext(params_file)\n\n # ---- txt\n if paramfile_extension=='.txt':\n # ---- Parse a generic text parameters file ---- #\n # ---- with the specified variable(s) ---- #\n ##############################################################\n \"\"\"\n NOTE: for streamlining\n Build a standardized .txt parameter file as follows:\n each row must be a standard specific file descriptor e.g.\n cmip experiment type1 type2 ensemble yr1 yr2 variable\n ----------------------------------------------------------\n CMIP5 MPI-ESM-LR Amon historical r1i1p1 1900 1982 tro3 \n \n IT IS IMPORTANT TO KEEP THIS ORDER OTHERWISE THINGS CAN GET VERY MESSY !!!\n \"\"\"\n if syndacall is True:\n # first poll the local server\n if verbose is True:\n #write_cache_direct(params_file,ls_host_root,host_root,pfile2,pfile3,errorfile,latestDir,verbose)\n write_cache_direct(params_file, rootp, pfile2, pfile3, errorfile, drs, verbose)\n else:\n #write_cache_direct(params_file,ls_host_root,host_root,pfile2,pfile3,errorfile,latestDir,verbose=False)\n write_cache_direct(params_file, rootp, pfile2, pfile3, errorfile, drs, verbose=False)\n print_stats(pfile2,pfile3)\n # check for incomplete/missing filedescriptors\n if os.path.exists(pfile3):\n ar = open(pfile3, 'r')\n lls = [line for line in ar if line.split()[0].split('_')[0] == 'CMIP5']\n lenitemlist = len(lls)\n cat11 = [(p.split()[0],'dope') for p in lls if p.split()[1] == 'ERROR-MISSING']\n cat21 = [(p.split()[0],p.split()[2]) for p in lls if p.split()[1] == 'INCOMPLETE']\n # construct two dictionaries:\n # A: contains all missing filedescriptors\n # B: contains the incomplete filedescriptors\n A = {}\n B = {}\n for item in cat21:\n A.setdefault(item[0],[]).append(item[1])\n for item in cat11:\n B.setdefault(item[0],[]).append(item[1])\n # convolve A and B so synda will download only the A's 'dope' (missing)\n # and the bits from B that are not already on disk\n Z = dict(A, **B)\n if verbose is True:\n print('\\n-----------------------------------------------------------------------------------------------------')\n print('We parsed a missing LOCAL data param file. We have missing/incomplete files for %i filedescriptors: ' % lenitemlist)\n print('Calling SYNDA to look for data in /sdt/data or download what is not found...')\n print('-------------------------------------------------------------------------------------------------------')\n for it in lls:\n ite = it.split()[0].split('_')\n v1 = ite[7]\n header = it.split()[0]\n model_data = ite[0] + ' '+ ite[1] + ' ' + ite[2]\\\n + ' ' + ite[3] + ' ' + ite[4]\n yr1 = int(ite[5])\n yr2 = int(ite[6])\n # call synda search\n outpt = synda_search(model_data,v1)\n if download is True:\n if verbose is True:\n if dryrunOn:\n s = synda_dll(outpt,v1,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=True,verbose=True)\n else:\n s = synda_dll(outpt,v1,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=False,verbose=True)\n else:\n if dryrunOn:\n s = synda_dll(outpt,v1,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=True,verbose=False)\n else:\n s = synda_dll(outpt,v1,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=False,verbose=False)\n else:\n if verbose is True:\n s = synda_dll(outpt,v1,yr1,yr2,header,Z,pfile4,pfile5,download=False,dryrunOn=False,verbose=True)\n else:\n s = synda_dll(outpt,v1,yr1,yr2,header,Z,pfile4,pfile5,download=False,dryrunOn=False,verbose=False)\n if s == 0:\n with open(pfile5, 'a') as file:\n file.write(header + ' ' + 'ERROR-MISSING' + '\\n')\n file.close()\n if os.path.exists(pfile4):\n fix_duplicate_entries(pfile4)\n if os.path.exists(errorfile):\n fix_duplicate_entries(errorfile)\n print_stats(pfile4,pfile5)\n\n # final cache merging and cleanup\n if os.path.exists(pfile2) and os.path.exists(pfile4):\n # create a composite file using caches from sever and synda\n compf = drb + '/cache_cmip5_combined_' + d + '.txt'\n cache_merge(pfile2,pfile4,compf)\n final_cache(params_file,compf,nm)\n print_final_stats(nm)\n plotter(nm,drb)\n else:\n # looks like synda didnt find anything extra\n if os.path.exists(pfile2):\n cpc = 'cp ' + pfile2 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache(params_file,pfile2,nm)\n print_final_stats(nm)\n plotter(nm,drb)\n else:\n # looks like there is nothing in local but synda found extra\n if os.path.exists(pfile4):\n cpc = 'cp ' + pfile4 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache(params_file,pfile4,nm)\n print_final_stats(nm)\n plotter(nm,drb)\n # in case synda missed some filedescriptors\n if os.path.exists(pfile5):\n fix_duplicate_entries(pfile5)\n cpc = 'cp ' + pfile5 + ' ' + drb + '/missing_cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n else:\n # no need to call synda if we found all needed filedescriptors on server\n print('Cached all needed data from local datasource %s' % d)\n if os.path.exists(pfile2):\n cpc = 'cp ' + pfile2 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache(params_file,pfile2,nm)\n print_final_stats(nm)\n plotter(nm,drb)\n #sys.exit(0)\n else:\n # not calling synda at all\n if verbose is True:\n print('\\n-------------------------------------------------------------------------------------')\n print('We have looked at existing files LOCALLY only: ')\n print('Here is what we found:')\n print('---------------------------------------------------------------------------------------')\n #write_cache_direct(params_file,ls_host_root,host_root,pfile2,pfile3,errorfile,latestDir,verbose)\n write_cache_direct(params_file, rootp, pfile2, pfile3, errorfile, drs, verbose)\n else:\n #write_cache_direct(params_file,ls_host_root,host_root,pfile2,pfile3,errorfile,latestDir,verbose=False)\n write_cache_direct(params_file, rootp, pfile2, pfile3, errorfile, drs, verbose=False)\n if os.path.exists(errorfile):\n fix_duplicate_entries(errorfile)\n print_stats(pfile2,pfile3)\n if os.path.exists(pfile2):\n cpc = 'cp ' + pfile2 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache(params_file,pfile2,nm)\n print_final_stats(nm)\n plotter(nm,drb)\n if os.path.exists(pfile3):\n cpc = 'cp ' + pfile3 + ' ' + drb + '/missing_cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n\n\n elif userVars:\n \n # ---- user command line arguments parsed here\n for vi in vpars:\n if os.path.exists('prepended_temp.txt'):\n os.remove('prepended_temp.txt')\n #if os.path.exists('temp.txt'):\n # os.remove('temp.txt')\n header = fpars[0] + '_'+ fpars[1] + '_' + fpars[2]\\\n + '_' + fpars[3] + '_' + fpars[4] + '_' + str(fpars[5])+ '_' + str(fpars[6])\\\n + '_' + vi\n model_data = fpars[0] + ' '+ fpars[1] + ' ' + fpars[2]\\\n + ' ' + fpars[3] + ' ' + fpars[4]\n if verbose is True:\n print('Looking at variable %s' % vi)\n print('Model data for: ', model_data + '\\n')\n yr1 = fpars[5]\n yr2 = fpars[6]\n tempfile = open('temp.txt', 'a')\n templine = model_data + ' ' + str(yr1) + ' ' + str(yr2) + ' ' + vi + '\\n'\n tempfile.write(templine)\n tempfile.close()\n if verbose is True:\n write_cache_direct('temp.txt',ls_host_root,host_root,pfile2,pfile3,errorfile,latestDir,verbose)\n else:\n write_cache_direct('temp.txt',ls_host_root,host_root,pfile2,pfile3,errorfile,latestDir,verbose=False)\n print_stats(pfile2,pfile3)\n if syndacall is True:\n if os.path.exists(pfile3):\n if verbose is True:\n print('\\n-------------------------------------------------------------------------------------')\n print('We are missing files for our needed filedescriptor: %s' % model_data + ' ' + str(yr1) + ' ' + str(yr2) + ' ' + vi)\n print('Calling SYNDA to look for data in /sdt/data or download what is not found...')\n print('---------------------------------------------------------------------------------------')\n ar = open(pfile3, 'r')\n lls = [line for line in ar if line.split()[0].split('_')[0] == 'CMIP5']\n lenitemlist = len(lls)\n cat11 = [(p.split()[0],'dope') for p in lls if p.split()[1] == 'ERROR-MISSING']\n cat21 = [(p.split()[0],p.split()[2]) for p in lls if p.split()[1] == 'INCOMPLETE']\n A = {}\n B = {}\n for item in cat21:\n A.setdefault(item[0],[]).append(item[1])\n for item in cat11:\n B.setdefault(item[0],[]).append(item[1])\n Z = dict(A, **B)\n outpt = synda_search(model_data,vi)\n if download is True:\n if verbose is True:\n if dryrunOn:\n s = synda_dll(outpt,vi,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=True,verbose=True)\n else:\n s = synda_dll(outpt,vi,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=False,verbose=True)\n else:\n if dryrunOn:\n s = synda_dll(outpt,vi,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=True,verbose=False)\n else:\n s = synda_dll(outpt,vi,yr1,yr2,header,Z,pfile4,pfile5,download=True,dryrunOn=False,verbose=False)\n else:\n if verbose is True:\n s = synda_dll(outpt,vi,yr1,yr2,header,Z,pfile4,pfile5,download=False,dryrunOn=False,verbose=True)\n else:\n s = synda_dll(outpt,vi,yr1,yr2,header,Z,pfile4,pfile5,download=False,dryrunOn=False,verbose=False)\n if s == 0:\n with open(pfile5, 'a') as file:\n file.write(header + ' ' + 'ERROR-MISSING' + '\\n')\n file.close()\n if os.path.exists(pfile4):\n fix_duplicate_entries(pfile4)\n if os.path.exists(errorfile):\n fix_duplicate_entries(errorfile)\n print_stats(pfile4,pfile5)\n # final cache merging and cleanup\n if os.path.exists(pfile2) and os.path.exists(pfile4):\n # create a composite file using caches from sever and synda\n compf = drb + '/cache_cmip5_combined_' + d + '.txt'\n cache_merge(pfile2,pfile4,compf)\n final_cache('temp.txt',compf,nm)\n print_final_stats(nm)\n else:\n # looks like synda didnt find anything extra\n if os.path.exists(pfile2):\n cpc = 'cp ' + pfile2 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache('temp.txt',pfile2,nm)\n print_final_stats(nm)\n else:\n # looks like there is nothing in local but synda found extra\n if os.path.exists(pfile4):\n cpc = 'cp ' + pfile4 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache('temp.txt',pfile4,nm)\n print_final_stats(nm)\n # in case synda missed some filedescriptors\n if os.path.exists(pfile5):\n fix_duplicate_entries(pfile5)\n cpc = 'cp ' + pfile5 + ' ' + drb + '/missing_cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n else:\n # no need to call synda if we found all needed filedescriptors on server\n print('Cached all data from local datasource %s' % d)\n if os.path.exists(pfile2):\n cpc = 'cp ' + pfile2 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache('temp.txt',pfile2,nm)\n print_final_stats(nm)\n # not calling synda at all\n if verbose is True:\n print('\\n-------------------------------------------------------------------------------------')\n print('We have looked at existing files LOCALLY only: ')\n print('Here is what we found:')\n print('---------------------------------------------------------------------------------------')\n if os.path.exists(errorfile):\n fix_duplicate_entries(errorfile)\n print_stats(pfile2,pfile3)\n if os.path.exists(pfile2):\n cpc = 'cp ' + pfile2 + ' ' + drb + '/cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n final_cache('temp.txt',pfile2,nm)\n print_final_stats(nm)\n if os.path.exists(pfile3):\n cpc = 'cp ' + pfile3 + ' ' + drb + '/missing_cache_cmip5_combined_' + d + '.txt'\n proc = subprocess.Popen(cpc, stdout=subprocess.PIPE, shell=True)\n (out, err) = proc.communicate()\n #os.remove('temp.txt')\n os.remove('prepended_temp.txt')\n\n # ---- timing and exit\n t2 = time.time()\n dt = t2 - t1\n if verbose is True:\n print('=================================')\n print('DONE! with datasource %s' % d)\n print('Time elapsed: %.1f seconds' % dt)\n print('=================================')\n print('Time elapsed: %.1f s' % dt)\n\n# ---- finish, cleanup and exit\nif params_file:\n prp = 'prepended_' + params_file\n os.remove(prp)\nif userVars:\n os.remove('temp.txt')\nt20 = time.time()\ndt0 = t20 - t10\nif verbose is True:\n print('==================================================')\n print('DONE! with all datasources')\n print('Time elapsed: %.1f seconds' % dt0)\n print('If your data is fully cached, you deserve a beer :)')\n print('==================================================')\nprint('Time elapsed: %.1f s' % dt0)\n\n# ---- end of code\n"
] |
[
[
"matplotlib.pyplot.title",
"numpy.unique",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"numpy.mean",
"numpy.savetxt",
"numpy.loadtxt"
]
] |
jmaslek/etf_scraper
|
[
"e4eb4eda035541340e0abd18cb267cd715b76727"
] |
[
"scrape_data.py"
] |
[
"import requests\nimport pandas as pd\nimport json\nfrom bs4 import BeautifulSoup as bs\nfrom bs4 import BeautifulSoup\nimport numpy as np\n\n\ndef assets_to_num(x):\n x = x.strip(\"$\")\n if x.endswith(\"M\"):\n return float(x.strip(\"M\"))\n elif x.endswith(\"B\"):\n return float(x.strip(\"B\")) * 1000\n elif x.endswith(\"K\"):\n return float(x.strip(\"K\")) / 1000\n else:\n return np.nan\n\nr = requests.get(\"https://stockanalysis.com/etf/\", headers={\"User-Agent\":\"Mozilla/5.0\"})\nsoup2 = BeautifulSoup(r.text,\"html.parser\")\nscript = soup2.find(\"script\",{\"id\":\"__NEXT_DATA__\"})\netf_symbols = pd.DataFrame(json.loads(script.text)[\"props\"][\"pageProps\"][\"stocks\"]).s.to_list()\n \ndf = pd.DataFrame()\nfor etf in etf_symbols:\n try:\n r = requests.get(f\"https://stockanalysis.com/etf/{etf}\", headers={\"User-Agent\":\"Mozilla/5.0\"})\n soup = bs(r.text, \"html.parser\") # %%\n tables = soup.findAll(\"table\")\n texts = []\n for tab in tables[:2]:\n entries = tab.findAll(\"td\")\n for ent in entries:\n texts.append(ent.get_text())\n\n vars = [0, 2, 4, 6, 8, 10, 12, 18, 20, 22, 26, 28, 30, 32]\n vals = [idx + 1 for idx in vars]\n columns = [texts[idx] for idx in vars]\n data = [texts[idx] for idx in vals] \n df[etf] = data\n \n except Exception as e:\n print(etf)\n \ndf.index = columns\ndf = df.T\ndf.columns = ['Assets',\n 'NAV',\n 'Expense',\n 'PE',\n 'SharesOut',\n 'Div',\n 'DivYield',\n 'Volume',\n 'Open',\n 'PrevClose',\n 'YrLow',\n 'YrHigh',\n 'Beta',\n 'N_Hold']\n\ndf[\"Assets\"] = df[\"Assets\"].apply(lambda x: assets_to_num(x) if isinstance(x,str) else np.nan)\ndf[\"NAV\"] = df[\"NAV\"].apply(lambda x: float(x.strip(\"$\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"Expense\"] = df[\"Expense\"].apply(lambda x: float(x.strip(\"%\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"PE\"] = df[\"PE\"].apply(lambda x: float(x) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"SharesOut\"] = df[\"SharesOut\"].apply(lambda x: assets_to_num(x))\ndf[\"Div\"] = df[\"Div\"].apply(lambda x: float(x.strip(\"$\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"DivYield\"] = df[\"DivYield\"].apply(lambda x: float(x.strip(\"%\").replace(\",\",\"\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"Volume\"] = df[\"Volume\"].apply(lambda x: float(x.replace(\",\",\"\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"PrevClose\"] = df[\"PrevClose\"].apply(lambda x: float(x.strip(\"$\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"Open\"] = df[\"Open\"].apply(lambda x: float(x.strip(\"$\")) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"PrevClose\"] = df[\"PrevClose\"].apply(lambda x: float(x) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"YrLow\"] = df[\"YrLow\"].apply(lambda x: float(x) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"YrHigh\"] = df[\"YrHigh\"].apply(lambda x: float(x) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"Beta\"] = df[\"Beta\"].apply(lambda x: float(x) if x not in [\"n/a\",\"-\"] else np.nan)\ndf[\"N_Hold\"] = df[\"N_Hold\"].apply(lambda x: float(x) if x not in [\"n/a\",\"-\"] else np.nan)\n\ndf.to_csv(\"etf_overviews.csv\")\n"
] |
[
[
"pandas.DataFrame"
]
] |
peytondmurray/mx3tools
|
[
"0f367c0c1c0ebb1887bf105555bd2a1edb9fc654"
] |
[
"test_vis.py"
] |
[
"\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.collections as collections\nimport matplotlib.patches as patches\nimport matplotlib.animation as animation\nimport mx3tools.ovftools as ovftools\nimport mx3tools.statutil as statutil\nimport mx3tools.datautil as datautil\nimport mx3tools.plotutil as plotutil\nimport mx3tools.util as util\nimport tqdm\nimport cmocean\nimport pprint\nplt.style.use('dark_background')\nnp.set_printoptions(linewidth=120, precision=3)\n\n\ndata = datautil.SimRun('/home/pdmurray/Desktop/Workspace/dmidw/barkhausen/D_0.0e-3/2019-06-24/')\n\n# azbins, azhist, _, _ = statutil.event_hists(data, 40, key='Az')\n# print('Done!')\n\n\nd = data.get_sim(1)\n\ny = util.diff(d.t(), d.table['ext_exactdwposavg (m)'].values, 1, 1)\n\nplt.plot(d.t(), d.vdw(), '-w')\nplt.plot(d.t(), y, '-r')\nplt.tight_layout()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.use",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
]
] |
MDAnalysis/pytng
|
[
"65b4c0c915fe60525d9b2dacb83c2c5d73b3f13f"
] |
[
"tests/conftest.py"
] |
[
"from collections import namedtuple\nimport numpy as np\nimport os\nimport pytest\n\nHERE = os.path.dirname(__file__)\n\n\n@pytest.fixture()\ndef CORRUPT_FILEPATH():\n # actually just an ascii file\n return os.path.join(HERE, \"reference_files\", \"badtngfile.tng\")\n\n\n@pytest.fixture()\ndef MISSING_FILEPATH():\n # return a file that doesn't exist\n return \"nonexistant.tng\"\n\n\n@pytest.fixture()\ndef TNG_EXAMPLE():\n return os.path.join(HERE, \"reference_files\", \"tng_example.tng\")\n\n\n@pytest.fixture()\ndef TNG_UTF8_EXAMPLE():\n return os.path.join(HERE, \"reference_files\", \"tng_😝_emoji.tng\")\n\n\n@pytest.fixture\ndef TNG_EXAMPLE_DATA():\n # reference data for GMX_REF from Gromacs/tng library\n TNG = namedtuple(\n \"TNGData\",\n [\"length\", \"natoms\", \"first_frame\", \"last_frame\", \"time\", \"box\"],\n )\n\n # reference data determined via `gmx dump`\n # 5 water molecules, chain W resname WAT\n # names O, HO1, HO2\n\n time = [None] * 10\n\n first_frame = np.array(\n [\n [1.00000e00, 1.00000e00, 1.00000e00],\n [2.00000e00, 2.00000e00, 2.00000e00],\n [3.00000e00, 3.00000e00, 3.00000e00],\n [1.10000e01, 1.10000e01, 1.10000e01],\n [1.20000e01, 1.20000e01, 1.20000e01],\n [1.30000e01, 1.30000e01, 1.30000e01],\n [2.10000e01, 2.10000e01, 2.10000e01],\n [2.20000e01, 2.20000e01, 2.20000e01],\n [2.30000e01, 2.30000e01, 2.30000e01],\n [8.25000e00, 3.30000e01, 3.30000e01],\n [8.25000e00, 3.40000e01, 3.30000e01],\n [8.50000e00, 3.30000e01, 3.40000e01],\n [5.00000e01, 5.00000e01, 5.00000e01],\n [5.10000e01, 5.10000e01, 5.10000e01],\n [1.00000e02, 1.00000e02, 1.00000e02],\n ],\n dtype=np.float64,\n )\n\n last_frame = np.array(\n [\n [1.015625e00, 1.015625e00, 1.015625e00],\n [2.00000e00, 2.00000e00, 2.00000e00],\n [3.00000e00, 3.00000e00, 3.00000e00],\n [1.10000e01, 1.10000e01, 1.10000e01],\n [1.20000e01, 1.20000e01, 1.20000e01],\n [1.30000e01, 1.30000e01, 1.30000e01],\n [2.10000e01, 2.10000e01, 2.10000e01],\n [2.20000e01, 2.20000e01, 2.20000e01],\n [2.30000e01, 2.30000e01, 2.30000e01],\n [8.25000e00, 3.30000e01, 3.30000e01],\n [8.25000e00, 3.40000e01, 3.30000e01],\n [8.50000e00, 3.30000e01, 3.40000e01],\n [5.00000e01, 5.00000e01, 5.00000e01],\n [5.10000e01, 5.10000e01, 5.10000e01],\n [1.00000e02, 1.00000e02, 1.00000e02],\n ],\n dtype=np.float64,\n )\n\n return TNG(\n length=10, # number of frames\n natoms=15,\n first_frame=first_frame,\n last_frame=last_frame,\n time=time,\n box=np.eye(3) * 50,\n )\n\n\n@pytest.fixture()\ndef ARGON_NPT_COMPRESSED():\n return os.path.join(HERE, \"reference_files\", \"argon_npt_compressed.tng\")\n\n\n@pytest.fixture\ndef ARGON_NPT_COMPRESSED_DATA():\n # reference data for Argon NPT COMPRESSED\n TNG = namedtuple(\n \"TNGData\",\n [\n \"length\",\n \"natoms\",\n \"first_frame_first_10_pos\",\n \"last_frame_last_10_pos\",\n \"first_box\",\n \"last_box\",\n \"time\",\n ],\n )\n\n time = [None] * 10\n\n first_frame_first_10_pos = np.array(\n [\n [2.53300e00, 1.24400e00, 3.50600e00],\n [8.30000e-01, 2.54400e00, 3.44800e00],\n [1.09100e00, 1.10000e-01, 3.12900e00],\n [2.45500e00, 5.00000e-03, 3.01200e00],\n [2.71400e00, 1.35300e00, 5.53000e-01],\n [3.05100e00, 2.89300e00, 2.69100e00],\n [1.42200e00, 2.77000e00, 1.46000e-01],\n [2.22300e00, 1.21100e00, 3.26800e00],\n [2.81100e00, 2.78900e00, 2.38500e00],\n [4.87000e-01, 1.15900e00, 1.17100e00],\n ],\n dtype=np.float64,\n )\n\n last_frame_last_10_pos = np.array(\n [\n [7.76000e-01, 1.19600e00, 7.73000e-01],\n [6.27000e-01, 3.34000e-01, 2.04900e00],\n [6.09000e-01, 3.46300e00, 2.57000e-01],\n [3.02000e00, 3.18400e00, 2.97600e00],\n [2.64700e00, 7.74000e-01, 1.81500e00],\n [1.56000e-01, 1.28300e00, 3.28100e00],\n [6.58000e-01, 3.03300e00, 2.90800e00],\n [2.08500e00, 3.55100e00, 1.43600e00],\n [1.56000e-01, 3.50200e00, 3.14000e-01],\n [1.28900e00, 9.98000e-01, 1.64500e00],\n ],\n dtype=np.float64,\n )\n\n first_box = np.array(\n [\n [3.60140, 0.00000, 0.000000],\n [0.000000, 3.60140, 0.000000],\n [0.000000, 0.000000, 3.60140],\n ]\n )\n\n last_box = np.array(\n [\n [3.589650, 0.000000, 0.000000],\n [0.000000, 3.589650, 0.000000],\n [0.000000, 0.000000, 3.589650],\n ]\n )\n\n return TNG(\n length=500001, # number of frames\n natoms=1000,\n first_frame_first_10_pos=first_frame_first_10_pos,\n last_frame_last_10_pos=last_frame_last_10_pos,\n first_box=first_box,\n last_box=last_box,\n time=time,\n )\n\n\n@pytest.fixture()\ndef WATER_NPT_COMPRESSED_TRJCONV():\n return os.path.join(\n HERE, \"reference_files\", \"water_npt_compressed_trjconv.tng\"\n )\n\n\n@pytest.fixture()\ndef WATER_NPT_UNCOMPRESSED_VELS_FORCES():\n return os.path.join(\n HERE, \"reference_files\", \"water_uncompressed_vels_forces.tng\"\n )\n\n\n@pytest.fixture\ndef WATER_NPT_UNCOMPRESSED_VELS_FORCES_DATA():\n # reference data for Argon NPT COMPRESSED\n TNG = namedtuple(\n \"TNGData\",\n [\n \"length\",\n \"natoms\",\n \"first_frame_first_10_pos\",\n \"last_frame_last_10_pos\",\n \"first_box\",\n \"last_box\",\n \"first_frame_first_10_vels\",\n \"last_frame_last_10_vels\",\n \"first_frame_first_10_frc\",\n \"last_frame_last_10_frc\",\n \"time\",\n ],\n )\n\n time = [None] * 10\n\n first_frame_first_10_pos = np.array(\n [\n [2.52700e00, 2.61101e00, 2.45398e00, ],\n [2.50319e00, 2.59390e00, 2.54510e00, ],\n [2.61687e00, 2.57898e00, 2.44623e00, ],\n [1.09097e00, 1.27301e00, 1.99202e00, ],\n [1.01457e00, 1.23310e00, 2.03366e00, ],\n [1.13694e00, 1.19976e00, 1.95100e00, ],\n [2.20399e00, 1.37297e00, 8.83017e-01, ],\n [2.13535e00, 1.38523e00, 9.48592e-01, ],\n [2.21780e00, 1.46022e00, 8.46139e-01, ],\n [1.10605e00, 2.11799e00, 5.61040e-01],\n ],\n dtype=np.float64,\n )\n\n last_frame_last_10_pos = np.array(\n [\n [7.98970e-01, 2.15481e00, 2.75854e00, ],\n [6.32804e-01, 6.59262e-01, 1.12701e00, ],\n [5.47739e-01, 6.89158e-01, 1.09488e00, ],\n [6.16521e-01, 5.70554e-01, 1.15907e00, ],\n [5.33961e-01, 2.20212e00, 6.22357e-02, ],\n [4.79836e-01, 2.17921e00, 1.37788e-01, ],\n [4.79169e-01, 2.18181e00, 2.88140e00, ],\n [5.76261e-01, 1.85258e00, 1.69974e00, ],\n [6.60233e-01, 1.87443e00, 1.74016e00, ],\n [5.79366e-01, 1.75766e00, 1.68776e00, ],\n ],\n dtype=np.float64,\n )\n\n first_box = np.array(\n [\n [2.87951e00, 0.00000e00, 0.00000e00],\n [0.00000e00, 2.87951e00, 0.00000e00],\n [0.00000e00, 0.00000e00, 2.87951e00],\n ]\n )\n\n last_box = np.array(\n [\n [2.89497e00, 0.00000e00, 0.00000e00],\n [0.00000e00, 2.89497e00, 0.00000e00],\n [0.00000e00, 0.00000e00, 2.89497e00],\n ]\n )\n\n first_frame_first_10_vels = np.array(\n [\n [3.51496e-01, 7.29674e-01, -5.33343e-02, ],\n [5.97873e-02, -1.00359e00, -4.19582e-01, ],\n [2.56209e-01, 5.52850e-01, -4.53435e-01, ],\n [-1.09184e-02, 3.66412e-01, -4.85018e-01, ],\n [9.26847e-01, -6.03737e-01, 3.67032e-01, ],\n [-9.85010e-02, 1.09447e00, -1.94833e00, ],\n [-4.60571e-02, 3.64507e-01, -2.01200e-01, ],\n [-1.23912e00, -3.46699e-01, -1.27041e00, ],\n [6.12738e-01, 7.64292e-01, 9.39986e-01, ],\n [-6.34257e-02, -3.96772e-02, -4.55601e-01, ],\n ],\n dtype=np.float64,\n )\n\n last_frame_last_10_vels = np.array(\n [\n [-1.29712e00, 1.89736e-01, -4.58020e-01, ],\n [-2.24550e-01, 1.98991e-01, -7.18228e-01, ],\n [9.92350e-02, 1.55654e-01, -1.64584e00, ],\n [-6.58128e-01, 4.26997e-01, -2.94439e-01, ],\n [-2.47945e-01, -4.03298e-01, 2.42530e-01, ],\n [3.88940e-01, 2.55276e-01, 9.15576e-01, ],\n [-1.57709e00, 5.61387e-01, 9.03308e-01, ],\n [-5.50578e-01, -3.38237e-01, -9.82961e-02, ],\n [4.52938e-01, -7.97070e-01, -1.83071e00, ],\n [-7.36810e-01, -2.02619e-01, -1.35719e00, ],\n ],\n dtype=np.float64,\n )\n\n first_frame_first_10_frc = np.array(\n [\n [-4.35261e02, 3.36017e02, -9.38570e02, ],\n [-1.75984e01, -2.44064e02, 1.25406e03, ],\n [6.57882e02, -2.07715e02, 2.72886e02, ],\n [1.75474e01, 1.57273e03, 2.80544e01, ],\n [-5.30602e02, -8.79351e02, 2.76766e02, ],\n [7.45154e01, -5.15662e02, -3.61260e02, ],\n [4.70405e02, -1.26065e03, -2.68651e02, ],\n [-5.15954e02, 5.19739e02, 2.85984e02, ],\n [-3.90010e02, 4.82308e02, 2.96046e00, ],\n [1.23199e03, -7.51883e02, -6.58181e02, ],\n ],\n dtype=np.float64,\n )\n\n last_frame_last_10_frc = np.array(\n [\n [-4.49360e02, -5.46652e02, 5.24477e02, ],\n [1.27648e03, 8.27699e02, 2.98916e01, ],\n [-9.49143e02, -3.13201e02, -3.78830e02, ],\n [-5.04814e02, -5.57331e02, -6.48604e01, ],\n [1.24046e03, 1.05411e03, 4.06005e02, ],\n [-3.61442e02, -5.29395e02, 1.26982e02, ],\n [-4.76165e02, -5.24370e02, -3.48132e02, ],\n [-7.41153e02, 1.19924e01, -7.19316e02, ],\n [5.67011e02, 6.64948e01, 2.13465e02, ],\n [2.43871e02, -4.09309e02, 4.87609e01, ],\n ],\n dtype=np.float64,\n )\n\n return TNG(\n length=500001, # number of frames\n natoms=1000,\n first_frame_first_10_pos=first_frame_first_10_pos,\n last_frame_last_10_pos=last_frame_last_10_pos,\n first_box=first_box,\n last_box=last_box,\n first_frame_first_10_vels=first_frame_first_10_vels,\n last_frame_last_10_vels=last_frame_last_10_vels,\n first_frame_first_10_frc=first_frame_first_10_frc,\n last_frame_last_10_frc=last_frame_last_10_frc,\n time=time,\n )\n"
] |
[
[
"numpy.eye",
"numpy.array"
]
] |
vluzko/652-project
|
[
"e8129a7e451998676ecdd4c2a7283bbd22074e72"
] |
[
"gail_and_bc/behavior_clone.py"
] |
[
"\"\"\"This code is based heavily on OpenAI's TRPO implementation.\nSee here for the original code: https://github.com/openai/baselines/tree/master/baselines/trpo_mpi/.\n\"\"\"\n\nimport argparse\nimport tempfile\nimport os.path as osp\nimport gym\nimport logging\nfrom tqdm import tqdm\n\nimport tensorflow as tf\n\nfrom baselines.gail import mlp_policy\nfrom baselines import bench\nfrom baselines import logger\nfrom baselines.common import set_global_seeds, tf_util as U\nfrom baselines.common.misc_util import boolean_flag\nfrom baselines.common.mpi_adam import MpiAdam\nfrom baselines.gail.dataset.mujoco_dset import Mujoco_Dset\n\nfrom run_mujoco import runner\nimport util\n\n\ndef argsparser():\n parser = argparse.ArgumentParser(\"Tensorflow Implementation of Behavior Cloning\")\n parser.add_argument('--env_id', help='environment ID', default='Hopper-v2')\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--expert_path', type=str, default='data/deterministic.trpo.Hopper.0.00.npz')\n parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint')\n parser.add_argument('--log_dir', help='the directory to save log file', default='log')\n # Mujoco Dataset Configuration\n parser.add_argument('--traj_limitation', type=int, default=4)\n # Network Configuration (Using MLP Policy)\n parser.add_argument('--policy_hidden_size', type=int, default=100)\n # for evaluatation\n boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')\n boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not')\n parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=1100)\n return parser.parse_args()\n\n\ndef learn(env, policy_func, dataset, optim_batch_size=1024, max_iters=1e4,\n adam_epsilon=1e-5, optim_stepsize=3e-4,\n ckpt_dir=None, log_dir=None, task_name=None,\n verbose=False):\n val_per_iter = int(max_iters / 10)\n ob_space = env.observation_space\n ac_space = env.action_space\n pi = policy_func(\"pi\", ob_space, ac_space) # Construct network for new policy\n # placeholder\n ob = U.get_placeholder_cached(name=\"ob\")\n ac = pi.pdtype.sample_placeholder([None])\n stochastic = U.get_placeholder_cached(name=\"stochastic\")\n loss = tf.reduce_mean(tf.square(ac - pi.ac))\n var_list = pi.get_trainable_variables()\n adam = MpiAdam(var_list, epsilon=adam_epsilon)\n lossandgrad = U.function([ob, ac, stochastic], [loss] + [U.flatgrad(loss, var_list)])\n\n U.initialize()\n adam.sync()\n # logger.log(\"Pretraining with Behavior Cloning...\")\n for iter_so_far in tqdm(range(int(max_iters))):\n ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size, 'train')\n train_loss, g = lossandgrad(ob_expert, ac_expert, True)\n adam.update(g, optim_stepsize)\n if verbose and iter_so_far % val_per_iter == 0:\n ob_expert, ac_expert = dataset.get_next_batch(-1, 'val')\n val_loss, _ = lossandgrad(ob_expert, ac_expert, True)\n logger.log(\"Training loss: {}, Validation loss: {}\".format(train_loss, val_loss))\n if iter_so_far % 50 == 0:\n beh_name = osp.join(ckpt_dir, task_name + str(iter_so_far))\n util.save_variables(beh_name, variables=pi.get_variables())\n\n if ckpt_dir is None:\n savedir_fname = tempfile.TemporaryDirectory().name\n else:\n savedir_fname = osp.join(ckpt_dir, task_name)\n util.save_variables(savedir_fname, variables=pi.get_variables())\n return savedir_fname\n\n\ndef get_task_name(args):\n task_name = 'BC'\n task_name += '.{}'.format(args.env_id.split(\"-\")[0])\n task_name += '.traj_limitation_{}'.format(args.traj_limitation)\n task_name += \".seed_{}\".format(args.seed)\n return task_name\n\n\ndef main(args):\n U.make_session(num_cpu=1).__enter__()\n set_global_seeds(args.seed)\n env = gym.make(args.env_id)\n\n def policy_fn(name, ob_space, ac_space, reuse=False):\n return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,\n reuse=reuse, hid_size=args.policy_hidden_size, num_hid_layers=2)\n\n env = bench.Monitor(env, logger.get_dir() and\n osp.join(logger.get_dir(), \"monitor.json\"))\n env.seed(args.seed)\n gym.logger.setLevel(logging.WARN)\n task_name = get_task_name(args)\n args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name)\n args.log_dir = osp.join(args.log_dir, task_name)\n dataset = Mujoco_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation)\n savedir_fname = learn(env,\n policy_fn,\n dataset,\n max_iters=args.BC_max_iter,\n ckpt_dir=args.checkpoint_dir,\n log_dir=args.log_dir,\n task_name=task_name,\n verbose=True)\n avg_len, avg_ret, ret_std = runner(env,\n policy_fn,\n savedir_fname,\n timesteps_per_batch=1024,\n number_trajs=10,\n stochastic_policy=args.stochastic_policy,\n save=args.save_sample,\n reuse=True)\n\n\nif __name__ == '__main__':\n args = argsparser()\n main(args)\n"
] |
[
[
"tensorflow.square"
]
] |
itzsimpl/NeMo
|
[
"c03f87d47fc57abc89c0ebf859fccba397dd0f8e"
] |
[
"nemo/collections/nlp/data/language_modeling/megatron/retro_dataset.py"
] |
[
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"RETRO Style dataset.\"\"\"\n\nimport torch\n\nfrom nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import get_train_valid_test_split_\nfrom nemo.utils import logging\n\ntry:\n from apex.transformer import parallel_state\n\n HAVE_APEX = True\nexcept (ImportError, ModuleNotFoundError):\n HAVE_APEX = False\n\n\nclass MockRETRODataset(torch.utils.data.Dataset):\n def __init__(self, cfg, trainer, tokenizer, name, size):\n super().__init__()\n self.name = name\n self.tokenizer = tokenizer\n self._cfg = cfg\n self.size = size\n seed_val = parallel_state.get_data_parallel_rank() * 131 + 97\n torch.manual_seed(seed_val)\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, idx):\n vocab_size = self.tokenizer.vocab_size\n\n neighbors = self._cfg.data.neighbors\n input_length = self._cfg.data.seq_length\n chunks = input_length // self._cfg.chunk_size\n chunk_size = self._cfg.chunk_size\n pad_id = self.tokenizer.pad_id\n\n all_tokens = torch.randint(0, vocab_size, (input_length + 1,))\n # make sure the eod happens at the end of each chunk, can add paddings to it\n # e.g. [..., id, id, pad, pad, pad, eod] each has chunk_size, each sentence\n # has length of multiple of chunk_size\n hidden = all_tokens[:-1]\n labels = all_tokens[1:]\n\n hidden_mask = hidden != pad_id\n # to mask out the token ids [id, id, eod, id, pad, eod, id, id]\n # so attention is not across eod, mask should be:\n # [false, true, true, true, true, true, true, true]\n # [false, false, true, true, true, true, true, true]\n # [false, false, false,true, true, true, true, true]\n # [true, true, true, false, true, true, true, true]\n # [true, true, true, true, true, true, true, true]\n # [true, true, true, false, true, false, true, true]\n # [true, true, true, true, true, true, false, true]\n # [true, true, true, true, true, true, false, false]\n retrieved = torch.randint(0, vocab_size, (chunks, neighbors, 2 * chunk_size))\n\n context_mask = retrieved != pad_id\n\n return {\n 'tokens': hidden,\n 'labels': labels,\n 'tokens_mask': hidden_mask,\n 'loss_mask': hidden_mask,\n 'retrieved_emb_mask': context_mask,\n 'retrieved_ids': retrieved,\n }\n\n\ndef build_mock_train_valid_test_datasets(\n cfg, trainer, splits_string, tokenizer, mock_data_size,\n):\n \"\"\"Build train, valid, and test datasets.\"\"\"\n\n splits = get_train_valid_test_split_(splits_string, mock_data_size)\n\n # Print stats about the splits.\n logging.info(' > dataset split:')\n\n def print_split_stats(name, index):\n logging.info(' {}:'.format(name))\n logging.info(\n ' document indices in [{}, {}) total of {} '\n 'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])\n )\n\n print_split_stats('train', 0)\n print_split_stats('validation', 1)\n print_split_stats('test', 2)\n\n def build_dataset(index, name):\n dataset = None\n if splits[index + 1] > splits[index]:\n dataset = MockRETRODataset(cfg, trainer, tokenizer, name, splits[index + 1] - splits[index],)\n return dataset\n\n train_dataset = build_dataset(0, 'train')\n valid_dataset = build_dataset(1, 'valid')\n test_dataset = build_dataset(2, 'test')\n\n return (train_dataset, valid_dataset, test_dataset)\n"
] |
[
[
"torch.manual_seed",
"torch.randint"
]
] |
microsoft/HuRL
|
[
"c9d0710ff6fd67b3cdbd46fc031cdbc3b3738cd2"
] |
[
"hurl/rl_utils.py"
] |
[
"# Some helper functions for using garage\n\n\nimport numpy as np\nimport torch\n\nfrom garage.torch.policies import GaussianMLPPolicy, TanhGaussianMLPPolicy, DeterministicMLPPolicy\nfrom garage.torch.q_functions import ContinuousMLPQFunction\nfrom garage.torch.value_functions import GaussianMLPValueFunction\nfrom garage.sampler import FragmentWorker, LocalSampler, RaySampler\nfrom garage.torch.optimizers import OptimizerWrapper\n\n\ndef get_mlp_policy(*,\n env_spec,\n stochastic=True,\n clip_output=False,\n hidden_sizes=(64, 64),\n hidden_nonlinearity=torch.tanh,\n min_std=np.exp(-20.),\n max_std=np.exp(2.)):\n\n if stochastic and clip_output:\n return TanhGaussianMLPPolicy(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None,\n min_std=min_std,\n max_std=max_std)\n\n if stochastic and not clip_output:\n return GaussianMLPPolicy(env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None)\n\n if not stochastic:\n return DeterministicMLPPolicy(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=torch.tanh if use_tanh else None)\n\n\n\ndef get_mlp_value(form='Q',\n *,\n env_spec,\n hidden_sizes=(256, 128),\n hidden_nonlinearity=torch.tanh,\n ensemble_size=1,\n ensemble_mode='P'\n ):\n\n if form=='Q':\n return ContinuousMLPQFunction(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None)\n if form=='V':\n return GaussianMLPValueFunction(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None,\n learn_std=False)\n\n\n\ndef collect_episode_batch(policy, *,\n env,\n batch_size,\n n_workers=4):\n \"\"\"Obtain one batch of episodes.\"\"\"\n sampler = get_sampler(policy, env=env, n_workers=n_workers)\n agent_update = policy.get_param_values()\n episodes = sampler.obtain_samples(0, batch_size, agent_update)\n return episodes\n\nfrom garage.sampler import Sampler\nimport copy\nfrom garage._dtypes import EpisodeBatch\nclass BatchSampler(Sampler):\n\n def __init__(self, episode_batch, randomize=True):\n self.episode_batch = episode_batch\n self.randomize = randomize\n self._counter = 0\n\n def obtain_samples(self, itr, num_samples, agent_update, env_update=None):\n\n ns = self.episode_batch.lengths\n if num_samples<np.sum(ns):\n if self.randomize:\n # Sample num_samples from episode_batch\n ns = self.episode_batch.lengths\n ind = np.random.permutation(len(ns))\n cumsum_permuted_ns = np.cumsum(ns[ind])\n itemindex = np.where(cumsum_permuted_ns>=num_samples)[0]\n if len(itemindex)>0:\n ld = self.episode_batch.to_list()\n j_max = min(len(ld), itemindex[0]+1)\n ld = [ld[i] for i in ind[:j_max].tolist()]\n sampled_eb = EpisodeBatch.from_list(self.episode_batch.env_spec,ld)\n else:\n sampled_eb = None\n else:\n ns = self.episode_batch.lengths\n ind = np.arange(len(ns))\n cumsum_permuted_ns = np.cumsum(ns[ind])\n counter = int(self._counter)\n itemindex = np.where(cumsum_permuted_ns>=num_samples*(counter+1))[0]\n itemindex0 = np.where(cumsum_permuted_ns>num_samples*counter)[0]\n if len(itemindex)>0:\n ld = self.episode_batch.to_list()\n j_max = min(len(ld), itemindex[0]+1)\n j_min = itemindex0[0]\n ld = [ld[i] for i in ind[j_min:j_max].tolist()]\n sampled_eb = EpisodeBatch.from_list(self.episode_batch.env_spec,ld)\n self._counter+=1\n else:\n sampled_eb = None\n else:\n sampled_eb = self.episode_batch\n\n return sampled_eb\n\n def shutdown_worker(self):\n pass\n\n\ndef get_sampler(policy,\n *,\n env,\n n_workers=4,\n **kwargs): # other kwargs for the sampler\n\n if n_workers==1:\n return LocalSampler(agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n worker_class=FragmentWorker,\n **kwargs)\n else:\n return RaySampler(agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n n_workers=n_workers,\n **kwargs)\n\n\n\nfrom garage.replay_buffer import PathBuffer\n\ndef get_replay_buferr(capacity=int(1e6)):\n return PathBuffer(capacity_in_transitions=capacity)\n\ndef get_optimizer(obj, lr,\n *,\n max_optimization_epochs=1,\n minibatch_size=128):\n\n return OptimizerWrapper((torch.optim.Adam, dict(lr=lr)),\n obj,\n max_optimization_epochs=max_optimization_epochs,\n minibatch_size=minibatch_size)"
] |
[
[
"numpy.cumsum",
"numpy.exp",
"numpy.where",
"numpy.sum"
]
] |
WildbookOrg/wbia-plugin-deepsense
|
[
"d452da23673e4bf97eb5e8e9b4844e729e6c2706"
] |
[
"wbia_deepsense/_plugin.py"
] |
[
"# -*- coding: utf-8 -*-\nimport logging\nfrom os.path import abspath, exists, join, dirname, split, splitext\nimport wbia\nfrom wbia.control import controller_inject, docker_control\nfrom wbia.constants import ANNOTATION_TABLE\nfrom wbia.web.apis_engine import ensure_uuid_list\nimport wbia.constants as const\nimport utool as ut\nimport wbia.dtool as dt\nimport vtool as vt\nimport numpy as np\nimport base64\nimport requests\nfrom PIL import Image, ImageDraw\nfrom io import BytesIO\n\n\nlogger = logging.getLogger()\n\n_, register_ibs_method = controller_inject.make_ibs_register_decorator(__name__)\nregister_api = controller_inject.get_wbia_flask_api(__name__)\nregister_preproc_annot = controller_inject.register_preprocs['annot']\n\n\nDIM_SIZE = 2000\n\nCONTAINER_ASSET_MAP = {\n 'flukebook_deepsense': {\n 'backend_url': None,\n 'individual_map_fpath': 'https://wildbookiarepository.azureedge.net/random/deepsense.flukebook.v0.csv',\n 'id_map': None,\n },\n 'deepsense_SRW_v1': {\n 'backend_url': None,\n 'individual_map_fpath': 'https://wildbookiarepository.azureedge.net/random/deepsense.australis.v1.csv',\n 'id_map': None,\n },\n 'original_deepsense': {\n 'backend_url': None,\n 'individual_map_fpath': 'https://wildbookiarepository.azureedge.net/random/deepsense.flukebook.v0.csv',\n 'id_map': None,\n },\n}\n\n\ndef _wbia_plugin_deepsense_check_container(url):\n endpoints = {\n 'api/alignment': ['POST'],\n 'api/keypoints': ['POST'],\n 'api/classify': ['POST'],\n }\n flag_list = []\n endpoint_list = list(endpoints.keys())\n for endpoint in endpoint_list:\n logger.info('Checking endpoint %r against url %r' % (endpoint, url))\n flag = False\n required_methods = set(endpoints[endpoint])\n supported_methods = None\n url_ = 'http://%s/%s' % (url, endpoint)\n\n try:\n response = requests.options(url_, timeout=1)\n except Exception:\n response = None\n\n if response is not None and response.status_code:\n headers = response.headers\n allow = headers.get('Allow', '')\n supported_methods_ = [method.strip().upper() for method in allow.split(',')]\n supported_methods = set(supported_methods_)\n if len(required_methods - supported_methods) == 0:\n flag = True\n if not flag:\n args = (endpoint,)\n logger.info(\n '[wbia_deepsense - FAILED CONTAINER ENSURE CHECK] Endpoint %r failed the check'\n % args\n )\n logger.info('\\tRequired Methods: %r' % (required_methods,))\n logger.info('\\tSupported Methods: %r' % (supported_methods,))\n logger.info('\\tFlag: %r' % (flag,))\n flag_list.append(flag)\n supported = np.all(flag_list)\n return supported\n\n\ndocker_control.docker_register_config(\n None,\n 'flukebook_deepsense',\n 'wildme/wbia-plugin-deepsense:latest',\n run_args={'_internal_port': 5000, '_external_suggested_port': 5000},\n container_check_func=_wbia_plugin_deepsense_check_container,\n)\n# next two lines for comparing containers side-by-side\ndocker_control.docker_register_config(\n None,\n 'flukebook_deepsense2',\n 'wildme/wbia-plugin-deepsense:app2',\n run_args={'_internal_port': 5000, '_external_suggested_port': 5000},\n container_check_func=_wbia_plugin_deepsense_check_container,\n)\ndocker_control.docker_register_config(\n None,\n 'flukebook_deepsense5',\n 'wildme/wbia-plugin-deepsense:app5',\n run_args={'_internal_port': 5000, '_external_suggested_port': 5000},\n container_check_func=_wbia_plugin_deepsense_check_container,\n)\ndocker_control.docker_register_config(\n None,\n 'deepsense_SRW_v1',\n 'wildme/wbia-plugin-deepsense:srw',\n run_args={'_internal_port': 5000, '_external_suggested_port': 5000},\n container_check_func=_wbia_plugin_deepsense_check_container,\n)\ndocker_control.docker_register_config(\n None,\n 'original_deepsense',\n 'wildme/wbia-plugin-deepsense:original',\n run_args={'_internal_port': 5000, '_external_suggested_port': 5000},\n container_check_func=_wbia_plugin_deepsense_check_container,\n)\n\n\n# This might need to be updated as part of extending the plugin in the future\ndef _deepsense_container_selector(ibs, aid):\n species = ibs.get_annot_species(aid)\n container_name = 'original_deepsense'\n if species == 'eubalaena_australis':\n container_name = 'deepsense_SRW_v1'\n return container_name\n\n\ndef _deepsense_url_selector(ibs, aid):\n container_name = _deepsense_container_selector(ibs, aid)\n return ibs.wbia_plugin_deepsense_ensure_backend(container_name)\n\n\n@register_ibs_method\ndef _wbia_plugin_deepsense_init_testdb(ibs):\n local_path = dirname(abspath(__file__))\n image_path = abspath(join(local_path, '..', 'example-images'))\n assert exists(image_path)\n gid_list = ibs.import_folder(image_path, ensure_loadable=False, ensure_exif=False)\n uri_list = ibs.get_image_uris_original(gid_list)\n annot_name_list = [splitext(split(uri)[1])[0] for uri in uri_list]\n aid_list = ibs.use_images_as_annotations(gid_list)\n ibs.set_annot_names(aid_list, annot_name_list)\n return gid_list, aid_list\n\n\n@register_ibs_method\ndef _wbia_plugin_deepsense_rank(ibs, response_json, desired_name):\n ids = response_json['identification']\n for index, result in enumerate(ids):\n whale_id = result['whale_id']\n flukebook_id = result.get('flukebook_id', whale_id)\n probability = result['probability']\n name = str(flukebook_id)\n if name == desired_name:\n return (index, probability)\n return (-1, -1)\n\n\n# This method converts from the ibeis/Flukebook individual UUIDs to the Deepsense/\n# NEAQ IDs used by the deepsense container.\n@register_ibs_method\ndef wbia_plugin_deepsense_id_to_flukebook(ibs, deepsense_id, container_name):\n id_dict = ibs.wbia_plugin_deepsense_ensure_id_map(container_name)\n if deepsense_id not in id_dict:\n # print warning bc we're missing a deepsense_id from our deepsense-flukebook map\n # logger.info('[WARNING]: deepsense id %s is missing from the deepsense-flukebook ID map .csv' % deepsense_id)\n return str(deepsense_id)\n ans = id_dict[deepsense_id]\n return ans\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_ensure_backend(\n ibs, container_name='flukebook_deepsense', **kwargs\n):\n global CONTAINER_ASSET_MAP\n assert container_name in CONTAINER_ASSET_MAP, (\n 'CONTAINER_ASSET_MAP has no entry for container %s' % container_name\n )\n # make sure that the container is online using docker_control functions\n if CONTAINER_ASSET_MAP[container_name]['backend_url'] is None:\n # Register depc blacklist\n prop_list = [None, 'theta', 'verts', 'species', 'name', 'yaws']\n for prop in prop_list:\n ibs.depc_annot.register_delete_table_exclusion(\n 'DeepsenseIdentification', prop\n )\n ibs.depc_annot.register_delete_table_exclusion('DeepsenseAlignment', prop)\n ibs.depc_annot.register_delete_table_exclusion('DeepsenseKeypoint', prop)\n\n BACKEND_URLS = ibs.docker_ensure(container_name)\n if len(BACKEND_URLS) == 0:\n raise RuntimeError('Could not ensure container')\n elif len(BACKEND_URLS) == 1:\n CONTAINER_ASSET_MAP[container_name]['backend_url'] = BACKEND_URLS[0]\n else:\n CONTAINER_ASSET_MAP[container_name]['backend_url'] = BACKEND_URLS[0]\n args = (\n BACKEND_URLS,\n container_name,\n )\n logger.info(\n '[WARNING] Multiple BACKEND_URLS:\\n\\tFound: %r\\n\\tUsing: %r' % args\n )\n return CONTAINER_ASSET_MAP[container_name]['backend_url']\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_ensure_id_map(ibs, container_name='flukebook_deepsense'):\n global CONTAINER_ASSET_MAP\n # make sure that the container is online using docker_control functions\n if CONTAINER_ASSET_MAP[container_name]['id_map'] is None:\n fpath = CONTAINER_ASSET_MAP[container_name]['individual_map_fpath']\n fpath = ut.grab_file_url(fpath, appname='wbia_deepsense', check_hash=True)\n csv_obj = ut.CSV.from_fpath(fpath, binary=False)\n CONTAINER_ASSET_MAP[container_name]['id_map'] = dict_from_csv(csv_obj)\n return CONTAINER_ASSET_MAP[container_name]['id_map']\n\n\n# I changed this to not be dependent on ints; warning untested\ndef dict_from_csv(csv_obj):\n import uuid\n\n id_dict = {}\n row_list = csv_obj.row_data\n row_list = row_list[1:] # skip header row\n for row in row_list:\n deepsense_id = row[0]\n if deepsense_id.isdigit():\n deepsense_id = int(deepsense_id)\n\n assert deepsense_id not in id_dict, (\n 'Deepsense-to-Flukebook id map contains two entries for deepsense ID %s'\n % deepsense_id\n )\n\n flukebook_id = row[1]\n try:\n uuid.UUID(flukebook_id)\n except Exception:\n raise ValueError(\n 'Unable to cast provided Flukebook id %s to a UUID' % flukebook_id\n )\n id_dict[deepsense_id] = flukebook_id\n return id_dict\n\n\n@register_ibs_method\n@register_api('/api/plugin/deepsense/identify/', methods=['GET'])\ndef wbia_plugin_deepsense_identify(ibs, annot_uuid, use_depc=True, config={}, **kwargs):\n r\"\"\"\n Run the Kaggle winning Right-whale deepsense.ai ID algorithm\n\n Args:\n ibs (IBEISController): IBEIS controller object\n annot_uuid (uuid): Annotation for ID\n\n CommandLine:\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_identify\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_identify:0\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import wbia_deepsense\n >>> import wbia\n >>> import utool as ut\n >>> from wbia.init import sysres\n >>> import numpy as np\n >>> container_name = ut.get_argval('--container', default='flukebook_deepsense')\n >>> print('Using container %s' % container_name)\n >>> dbdir = sysres.ensure_testdb_identification_example()\n >>> ibs = wbia.opendb(dbdir=dbdir)\n >>> gid_list, aid_list = ibs._wbia_plugin_deepsense_init_testdb()\n >>> annot_uuid_list = ibs.get_annot_uuids(aid_list)\n >>> annot_name_list = ibs.get_annot_names(aid_list)\n >>> rank_list = []\n >>> score_list = []\n >>> for annot_uuid, annot_name in zip(annot_uuid_list, annot_name_list):\n >>> resp_json = ibs.wbia_plugin_deepsense_identify(annot_uuid, use_depc=False, container_name=container_name)\n >>> rank, score = ibs._wbia_plugin_deepsense_rank(resp_json, annot_name)\n >>> print('[instant] for whale id = %s, got rank %d with score %0.04f' % (annot_name, rank, score, ))\n >>> rank_list.append(rank)\n >>> score_list.append('%0.04f' % score)\n >>> response_list = ibs.depc_annot.get('DeepsenseIdentification', aid_list, 'response')\n >>> rank_list_cache = []\n >>> score_list_cache = []\n >>> for annot_name, resp_json in zip(annot_name_list, response_list):\n >>> rank, score = ibs._wbia_plugin_deepsense_rank(resp_json, annot_name)\n >>> print('[cache] for whale id = %s, got rank %d with score %0.04f' % (annot_name, rank, score, ))\n >>> rank_list_cache.append(rank)\n >>> score_list_cache.append('%0.04f' % score)\n >>> assert rank_list == rank_list_cache\n >>> # assert score_list == score_list_cache\n >>> result = (rank_list, score_list)\n >>> print(result)\n ([0, -1, -1, 0], ['0.9052', '-1.0000', '-1.0000', '0.6986'])\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import wbia_deepsense\n >>> import wbia\n >>> import utool as ut\n >>> from wbia.init import sysres\n >>> import numpy as np\n >>> container_name = ut.get_argval('--container', default='flukebook_deepsense')\n >>> print('Using container %s' % container_name)\n >>> dbdir = sysres.ensure_testdb_identification_example()\n >>> ibs = wbia.opendb(dbdir=dbdir)\n >>> gid_list, aid_list_ = ibs._wbia_plugin_deepsense_init_testdb()\n >>> aid = aid_list_[3]\n >>> aid_list = [aid] * 10\n >>> annot_uuid_list = ibs.get_annot_uuids(aid_list)\n >>> annot_name_list = ibs.get_annot_names(aid_list)\n >>> rank_list = []\n >>> score_list = []\n >>> for annot_uuid, annot_name in zip(annot_uuid_list, annot_name_list):\n >>> resp_json = ibs.wbia_plugin_deepsense_identify(annot_uuid, use_depc=False, container_name=container_name)\n >>> rank, score = ibs._wbia_plugin_deepsense_rank(resp_json, annot_name)\n >>> print('[instant] for whale id = %s, got rank %d with score %0.04f' % (annot_name, rank, score, ))\n >>> rank_list.append(rank)\n >>> score_list.append(score)\n >>> rank_list = np.array(rank_list)\n >>> score_list = np.array(score_list)\n >>> print(np.min(rank_list))\n >>> print(np.max(rank_list))\n >>> print(np.mean(rank_list))\n >>> print(np.std(rank_list))\n >>> print(np.min(score_list))\n >>> print(np.max(score_list))\n >>> print(np.mean(score_list))\n >>> print(np.std(score_list))\n >>> result = (rank_list, score_list)\n print(result)\n ([0, -1, -1, 0], ['0.9052', '-1.0000', '-1.0000', '0.6986'])\n \"\"\"\n aid = aid_from_annot_uuid(ibs, annot_uuid)\n\n if use_depc:\n response_list = ibs.depc_annot.get(\n 'DeepsenseIdentification', [aid], 'response', config=config\n )\n response = response_list[0]\n else:\n response = ibs.wbia_plugin_deepsense_identify_aid(aid, config=config, **kwargs)\n return response\n\n\ndef aid_from_annot_uuid(ibs, annot_uuid):\n annot_uuid_list = [annot_uuid]\n ibs.web_check_uuids(qannot_uuid_list=annot_uuid_list)\n annot_uuid_list = ensure_uuid_list(annot_uuid_list)\n # Ensure annotations\n aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)\n aid = aid_list[0]\n return aid\n\n\n@register_ibs_method\ndef get_b64_image(ibs, aid, training_config=False, **kwargs):\n if not training_config:\n image_path = ibs.deepsense_annot_chip_fpath(aid, **kwargs)\n else:\n image_path = deepsense_annot_training_chip_fpath(ibs, aid)\n pil_image = Image.open(image_path)\n byte_buffer = BytesIO()\n pil_image.save(byte_buffer, format='JPEG')\n b64_image = base64.b64encode(byte_buffer.getvalue()).decode('utf-8')\n return b64_image\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_identify_aid(ibs, aid, config={}, **kwargs):\n url = _deepsense_url_selector(ibs, aid)\n b64_image = ibs.get_b64_image(aid, **config)\n data = {\n 'image': b64_image,\n 'configuration': {'top_n': 100, 'threshold': 0.0},\n }\n url = 'http://%s/api/classify' % (url)\n logger.info('Sending identify to %s' % url)\n response = requests.post(url, json=data, timeout=120)\n assert response.status_code == 200\n response = response.json()\n container_name = _deepsense_container_selector(ibs, aid)\n response = update_response_with_flukebook_ids(ibs, response, container_name)\n return response\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_align_aid(ibs, aid, config={}, training_config=False, **kwargs):\n url = _deepsense_url_selector(ibs, aid)\n b64_image = get_b64_image(ibs, aid, training_config=training_config, **config)\n data = {\n 'image': b64_image,\n }\n url = 'http://%s/api/alignment' % (url)\n logger.info('Sending alignment to %s' % url)\n response = requests.post(url, json=data, timeout=120)\n assert response.status_code == 200\n return response.json()\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_keypoint_aid(\n ibs, aid, alignment_result, config={}, training_config=False, **kwargs\n):\n url = _deepsense_url_selector(ibs, aid)\n b64_image = get_b64_image(ibs, aid, training_config=training_config, **config)\n data = alignment_result.copy()\n data['image'] = b64_image\n url = 'http://%s/api/keypoints' % (url)\n logger.info('Sending keypoints to %s' % url)\n response = requests.post(url, json=data, timeout=120)\n assert response.status_code == 200\n return response.json()\n\n\n@register_ibs_method\n@register_api('/api/plugin/deepsense/align/', methods=['GET'])\ndef wbia_plugin_deepsense_align(ibs, annot_uuid, use_depc=True, config={}, **kwargs):\n r\"\"\"\n Run the Kaggle winning Right-whale deepsense.ai ID algorithm\n\n Args:\n ibs (IBEISController): IBEIS controller object\n annot_uuid (uuid): Annotation for ID\n\n CommandLine:\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_align\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_align:0\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import wbia_deepsense\n >>> import wbia\n >>> import utool as ut\n >>> from wbia.init import sysres\n >>> import numpy as np\n >>> container_name = ut.get_argval('--container', default='flukebook_deepsense')\n >>> print('Using container %s' % container_name)\n >>> dbdir = sysres.ensure_testdb_identification_example()\n >>> ibs = wbia.opendb(dbdir=dbdir)\n >>> gid_list, aid_list = ibs._wbia_plugin_deepsense_init_testdb()\n >>> annot_uuid_list = ibs.get_annot_uuids(aid_list)\n >>> aligns_list = []\n >>> for annot_uuid in annot_uuid_list:\n >>> resp_json = ibs.wbia_plugin_deepsense_align(annot_uuid, use_depc=False, container_name=container_name)\n >>> aligns_list.append(resp_json)\n >>> aligns_list_cache = ibs.depc_annot.get('DeepsenseAlignment', aid_list, 'response')\n >>> assert aligns_list == aligns_list_cache\n >>> aligns_list_cache\n >>> print(result)\n [{'localization': {'bbox1': {'x': 994, 'y': 612}, 'bbox2': {'x': 1511, 'y': 1160}}}, {'localization': {'bbox1': {'x': 0, 'y': 408}, 'bbox2': {'x': 1128, 'y': 727}}}, {'localization': {'bbox1': {'x': 2376, 'y': 404}, 'bbox2': {'x': 3681, 'y': 1069}}}, {'localization': {'bbox1': {'x': 822, 'y': 408}, 'bbox2': {'x': 1358, 'y': 956}}}]\n \"\"\"\n aid = aid_from_annot_uuid(ibs, annot_uuid)\n\n if use_depc:\n response_list = ibs.depc_annot.get(\n 'DeepsenseAlignment', [aid], 'response', config=config\n )\n response = response_list[0]\n else:\n response = ibs.wbia_plugin_deepsense_align_aid(aid, config=config, **kwargs)\n return response\n\n\n@register_ibs_method\n@register_api('/api/plugin/deepsense/keypoint/', methods=['GET'])\ndef wbia_plugin_deepsense_keypoint(ibs, annot_uuid, use_depc=True, config={}, **kwargs):\n r\"\"\"\n Run the Kaggle winning Right-whale deepsense.ai ID algorithm\n\n Args:\n ibs (IBEISController): IBEIS controller object\n annot_uuid (uuid): Annotation for ID\n\n CommandLine:\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_keypoint\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_keypoint:0\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import wbia_deepsense\n >>> import wbia\n >>> import utool as ut\n >>> from wbia.init import sysres\n >>> import numpy as np\n >>> container_name = ut.get_argval('--container', default='flukebook_deepsense')\n >>> print('Using container %s' % container_name)\n >>> dbdir = sysres.ensure_testdb_identification_example()\n >>> ibs = wbia.opendb(dbdir=dbdir)\n >>> gid_list, aid_list = ibs._wbia_plugin_deepsense_init_testdb()\n >>> annot_uuid_list = ibs.get_annot_uuids(aid_list)\n >>> viewpoint_list = []\n >>> for annot_uuid in annot_uuid_list:\n >>> resp_json = ibs.wbia_plugin_deepsense_keypoint(annot_uuid, use_depc=False, container_name=container_name)\n >>> viewpoint_list.append(resp_json)\n >>> viewpoint_list_cache = ibs.depc_annot.get('DeepsenseKeypoint', aid_list, 'response')\n >>> assert viewpoint_list == viewpoint_list_cache\n >>> result = viewpoint_list_cache\n >>> print(result)\n [{'keypoints': {'blowhead': {'x': 1357, 'y': 963}, 'bonnet': {'x': 1151, 'y': 804}, 'angle': -142.33743653326957}}, {'keypoints': {'blowhead': {'x': 0, 'y': 724}, 'bonnet': {'x': 757, 'y': 477}, 'angle': -18.070882049942213}}, {'keypoints': {'blowhead': {'x': 3497, 'y': 404}, 'bonnet': {'x': 2875, 'y': 518}, 'angle': -190.38588712124752}}, {'keypoints': {'blowhead': {'x': 1098, 'y': 784}, 'bonnet': {'x': 1115, 'y': 523}, 'angle': -86.27335507676072}}]\n\n \"\"\"\n aid = aid_from_annot_uuid(ibs, annot_uuid)\n\n if use_depc:\n # TODO: depc version\n response_list = ibs.depc_annot.get('DeepsenseKeypoint', [aid], 'response')\n response = response_list[0]\n else:\n alignment = ibs.wbia_plugin_deepsense_align_aid(aid, config=config, **kwargs)\n response = ibs.wbia_plugin_deepsense_keypoint_aid(\n aid, alignment, config=config, **kwargs\n )\n return response\n\n\n@register_ibs_method\ndef deepsense_annot_chip_fpath(ibs, aid, dim_size=DIM_SIZE, **kwargs):\n\n gid = ibs.get_annot_gids(aid)\n w, h = ibs.get_image_sizes(gid)\n xtl, ytl, w_, h_ = ibs.get_annot_bboxes(aid)\n image_area = w * h\n if image_area <= 1:\n image_area = -1\n annot_area = w_ * h_\n coverage = annot_area / image_area\n trivial = coverage >= 0.99\n logger.info(\n '[Deepsense] Trivial config?: %r (area percentage = %0.02f)' % (trivial, coverage)\n )\n\n if trivial:\n config = {\n 'dim_size': dim_size,\n 'resize_dim': 'area',\n 'ext': '.jpg',\n }\n else:\n config = {\n 'dim_size': dim_size // 2,\n 'resize_dim': 'area',\n 'pad': 0.99,\n 'ext': '.jpg',\n }\n logger.info('[Deepsense] Using chip_fpath config = %s' % (ut.repr3(config),))\n\n fpath = ibs.get_annot_chip_fpath(aid, ensure=True, config2_=config)\n return fpath\n\n\n@register_ibs_method\ndef deepsense_annot_training_chip_fpath(ibs, aid, **kwargs):\n\n config = {\n 'dim_size': (256, 256),\n 'resize_dim': 'wh',\n 'ext': '.jpg',\n }\n fpath = ibs.get_annot_chip_fpath(aid, ensure=True, config2_=config)\n return fpath\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_illustration(\n ibs, annot_uuid, output=False, config={}, **kwargs\n):\n r\"\"\"\n Run the illustration examples\n\n Args:\n ibs (IBEISController): IBEIS controller object\n annot_uuid (uuid): Annotation for ID\n\n CommandLine:\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_illustration\n python -m wbia_deepsense._plugin --test-wbia_plugin_deepsense_illustration:0\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> import wbia_deepsense\n >>> import wbia\n >>> import utool as ut\n >>> from wbia.init import sysres\n >>> import numpy as np\n >>> container_name = ut.get_argval('--container', default='flukebook_deepsense')\n >>> print('Using container %s' % container_name)\n >>> dbdir = sysres.ensure_testdb_identification_example()\n >>> ibs = wbia.opendb(dbdir=dbdir)\n >>> gid_list, aid_list = ibs._wbia_plugin_deepsense_init_testdb()\n >>> annot_uuid_list = ibs.get_annot_uuids(aid_list)\n >>> for annot_uuid in annot_uuid_list:\n >>> output_filepath_list = ibs.wbia_plugin_deepsense_illustration(annot_uuid)\n \"\"\"\n alignment = ibs.wbia_plugin_deepsense_align(annot_uuid, config=config)\n keypoints = ibs.wbia_plugin_deepsense_keypoint(annot_uuid, config=config)\n aid = aid_from_annot_uuid(ibs, annot_uuid)\n image_path = ibs.deepsense_annot_chip_fpath(aid, **config)\n # TODO write this func\n # image_path = ibs.get_deepsense_chip_fpath(aid)\n pil_img = Image.open(image_path)\n # draw a red box based on alignment on pil_image\n draw = ImageDraw.Draw(pil_img)\n # draw.rectangle(((0, 00), (100, 100)), fill=\"black\")\n draw.rectangle(\n (\n (\n alignment['localization']['bbox1']['x'],\n alignment['localization']['bbox1']['y'],\n ),\n (\n alignment['localization']['bbox2']['x'],\n alignment['localization']['bbox2']['y'],\n ),\n ),\n outline='red',\n width=5,\n )\n\n blowhead = (\n keypoints['keypoints']['blowhead']['x'],\n keypoints['keypoints']['blowhead']['y'],\n )\n blowhead_btm, blowhead_top = bounding_box_at_centerpoint(blowhead)\n draw.ellipse((blowhead_btm, blowhead_top), outline='green', width=5)\n\n bonnet = (\n keypoints['keypoints']['bonnet']['x'],\n keypoints['keypoints']['bonnet']['y'],\n )\n bonnet_btm, bonnet_top = bounding_box_at_centerpoint(bonnet)\n draw.ellipse((bonnet_btm, bonnet_top), outline='blue', width=5)\n\n if output:\n local_path = dirname(abspath(__file__))\n output_path = abspath(join(local_path, '..', '_output'))\n ut.ensuredir(output_path)\n output_filepath_fmtstr = join(output_path, 'illustration-%s.jpg')\n output_filepath = output_filepath_fmtstr % (annot_uuid,)\n logger.info('Writing to %s' % (output_filepath,))\n pil_img.save(output_filepath)\n\n return pil_img\n\n\n@register_ibs_method\ndef wbia_plugin_deepsense_passport(ibs, annot_uuid, output=False, config={}, **kwargs):\n keypoints = ibs.wbia_plugin_deepsense_keypoint(annot_uuid, config=config)\n aid = aid_from_annot_uuid(ibs, annot_uuid)\n image_path = ibs.deepsense_annot_chip_fpath(aid, **config)\n # TODO write this func\n # image_path = ibs.get_deepsense_chip_fpath(aid)\n pil_img = Image.open(image_path)\n\n # add padding on all sides of the image to prevent cutoff\n orig_size_np = np.array(pil_img.size)\n new_size = tuple(orig_size_np * 3)\n canvas = Image.new('RGB', new_size)\n canvas.paste(pil_img, pil_img.size)\n\n # get new coords of the blowhead and bonnet to use for rotation\n blowhead_np = np.array(\n (keypoints['keypoints']['blowhead']['x'], keypoints['keypoints']['blowhead']['y'])\n )\n blowhead_np += orig_size_np\n bonnet_np = np.array(\n (keypoints['keypoints']['bonnet']['x'], keypoints['keypoints']['bonnet']['y'])\n )\n bonnet_np += orig_size_np\n bonnet = tuple(bonnet_np)\n\n # rotate along the whale's axis\n angle = keypoints['keypoints']['angle']\n angle -= 90.0 # deepsense is left-aligned by default, we prefer top-aligned\n # translate coords are the difference from the blowhold to the center of the image\n blowhole = bonnet_np\n center = orig_size_np * 1.5\n translate = tuple(center - blowhole)\n canvas = canvas.rotate(\n angle, center=bonnet, translate=translate, resample=Image.NEAREST\n )\n\n # crop down to a square around the keypoints\n axis_line = blowhead_np - bonnet_np\n unit_size = np.hypot(axis_line[0], axis_line[1])\n crop_1 = center - np.array((unit_size, 1.5 * unit_size))\n crop_2 = center + np.array((unit_size, 0.5 * unit_size))\n # PIL.Image.crop needs a 4-tuple of ints for the crop function\n crop_box = tuple(np.concatenate((crop_1, crop_2)).astype(int))\n canvas = canvas.crop(crop_box)\n\n # resize the image to standard\n square_size = 256 # TODO this was 1000\n canvas = canvas.resize((square_size, square_size), resample=Image.LANCZOS)\n # now draw ellipses on the blowhole and bonnet.\n # because of the rotation, centering, and now resizing, we know these will always be in the exact same pixel location\n # draw = ImageDraw.Draw(canvas)\n # bonnet_coords = bounding_box_at_centerpoint((square_size / 2, square_size / 4))\n # # draw.ellipse( bonnet_coords, outline=\"green\", width=2) # TODO this was not commented\n # blowhole_coords = bounding_box_at_centerpoint((square_size / 2, square_size * 3 / 4))\n # draw.ellipse( blowhole_coords, outline=\"blue\", width=2) # TODO this was not commented\n\n if output:\n local_path = dirname(abspath(__file__))\n output_path = abspath(join(local_path, '..', '_output'))\n ut.ensuredir(output_path)\n output_filepath_fmtstr = join(output_path, 'passport-%s.jpg')\n output_filepath = output_filepath_fmtstr % (annot_uuid,)\n logger.info('Writing to %s' % (output_filepath,))\n canvas.save(output_filepath)\n\n return canvas\n\n\ndef bounding_box_at_centerpoint(point, radius=15):\n point_less = tuple(coord - radius for coord in point)\n point_more = tuple(coord + radius for coord in point)\n return (point_less, point_more)\n\n\ndef update_response_with_flukebook_ids(ibs, response, container_name):\n for score_dict in response['identification']:\n deepsense_id = score_dict['whale_id']\n # below method needs to be updated to be species-sensitive\n flukebook_id = ibs.wbia_plugin_deepsense_id_to_flukebook(\n deepsense_id, container_name\n )\n score_dict['flukebook_id'] = flukebook_id\n return response\n\n\nclass DeepsenseIdentificationConfig(dt.Config): # NOQA\n _param_info_list = [\n ut.ParamInfo('dim_size', DIM_SIZE),\n ]\n\n\n@register_preproc_annot(\n tablename='DeepsenseIdentification',\n parents=[ANNOTATION_TABLE],\n colnames=['response'],\n coltypes=[dict],\n configclass=DeepsenseIdentificationConfig,\n fname='deepsense',\n chunksize=4,\n)\ndef wbia_plugin_deepsense_identify_deepsense_ids_depc(depc, aid_list, config):\n # The doctest for wbia_plugin_deepsense_identify_deepsense_ids also covers this func\n ibs = depc.controller\n for aid in aid_list:\n response = ibs.wbia_plugin_deepsense_identify_aid(aid, config=config)\n yield (response,)\n\n\nclass DeepsenseAlignmentConfig(dt.Config): # NOQA\n _param_info_list = [\n ut.ParamInfo('dim_size', DIM_SIZE),\n ]\n\n\n@register_preproc_annot(\n tablename='DeepsenseAlignment',\n parents=[ANNOTATION_TABLE],\n colnames=['response'],\n coltypes=[dict],\n configclass=DeepsenseAlignmentConfig,\n fname='deepsense',\n chunksize=128,\n)\ndef wbia_plugin_deepsense_align_deepsense_ids_depc(depc, aid_list, config):\n # The doctest for wbia_plugin_deepsense_identify_deepsense_ids also covers this func\n ibs = depc.controller\n for aid in aid_list:\n response = ibs.wbia_plugin_deepsense_align_aid(aid, config=config)\n yield (response,)\n\n\nclass DeepsenseKeypointsConfig(dt.Config): # NOQA\n _param_info_list = [\n ut.ParamInfo('dim_size', DIM_SIZE),\n ]\n\n\n@register_preproc_annot(\n tablename='DeepsenseKeypoint',\n parents=['DeepsenseAlignment'],\n colnames=['response'],\n coltypes=[dict],\n configclass=DeepsenseKeypointsConfig,\n fname='deepsense',\n chunksize=128,\n)\ndef wbia_plugin_deepsense_keypoint_deepsense_ids_depc(depc, alignment_rowids, config):\n # The doctest for wbia_plugin_deepsense_identify_deepsense_ids also covers this func\n ibs = depc.controller\n alignments = depc.get_native('DeepsenseAlignment', alignment_rowids, 'response')\n aid_list = depc.get_ancestor_rowids('DeepsenseAlignment', alignment_rowids)\n for alignment, aid in zip(alignments, aid_list):\n response = ibs.wbia_plugin_deepsense_keypoint_aid(aid, alignment, config=config)\n yield (response,)\n\n\nclass DeepsenseTrainingConfig(dt.Config): # NOQA\n _param_info_list = [ut.ParamInfo('dim_size', (256, 256))]\n\n\n@register_preproc_annot(\n tablename='DeepsenseTraining',\n parents=[ANNOTATION_TABLE],\n colnames=['response'],\n coltypes=[dict],\n configclass=DeepsenseTrainingConfig,\n fname='deepsense',\n chunksize=128,\n)\ndef wbia_plugin_deepsense_training_keypoints(depc, aid_list, config):\n # The doctest for wbia_plugin_deepsense_identify_deepsense_ids also covers this func\n ibs = depc.controller\n for aid in aid_list:\n alignment = ibs.wbia_plugin_deepsense_align_aid(aid, training_config=True)\n response = ibs.wbia_plugin_deepsense_keypoint_aid(\n aid, alignment, training_config=True\n )\n yield (response,)\n\n\nclass DeepsenseIllustrationConfig(dt.Config): # NOQA\n _param_info_list = [ut.ParamInfo('dim_size', DIM_SIZE), ut.ParamInfo('ext', '.jpg')]\n\n\ndef pil_image_load(absolute_path):\n pil_img = Image.open(absolute_path)\n return pil_img\n\n\ndef pil_image_write(absolute_path, pil_img):\n pil_img.save(absolute_path)\n\n\n@register_preproc_annot(\n tablename='DeepsenseIllustration',\n parents=[ANNOTATION_TABLE],\n colnames=['image'],\n coltypes=[('extern', pil_image_load, pil_image_write)],\n configclass=DeepsenseIllustrationConfig,\n fname='deepsense',\n chunksize=128,\n)\ndef wbia_plugin_deepsense_illustrate_deepsense_ids_depc(depc, aid_list, config):\n # The doctest for wbia_plugin_deepsense_identify_deepsense_ids also covers this func\n ibs = depc.controller\n annot_uuid_list = ibs.get_annot_uuids(aid_list)\n for annot_uuid in annot_uuid_list:\n response = ibs.wbia_plugin_deepsense_illustration(annot_uuid, config=config)\n yield (response,)\n\n\nclass DeepsensePassportConfig(dt.Config): # NOQA\n _param_info_list = [ut.ParamInfo('dim_size', DIM_SIZE), ut.ParamInfo('ext', '.jpg')]\n\n\n@register_preproc_annot(\n tablename='DeepsensePassport',\n parents=[ANNOTATION_TABLE],\n colnames=['image'],\n coltypes=[('extern', pil_image_load, pil_image_write)],\n configclass=DeepsensePassportConfig,\n fname='deepsense',\n chunksize=128,\n)\ndef wbia_plugin_deepsense_passport_deepsense_ids_depc(depc, aid_list, config):\n # The doctest for wbia_plugin_deepsense_identify_deepsense_ids also covers this func\n ibs = depc.controller\n annot_uuid_list = ibs.get_annot_uuids(aid_list)\n for annot_uuid in annot_uuid_list:\n response = ibs.wbia_plugin_deepsense_passport(annot_uuid, config=config)\n yield (response,)\n\n\ndef get_match_results(depc, qaid_list, daid_list, score_list, config):\n \"\"\"converts table results into format for ipython notebook\"\"\"\n # qaid_list, daid_list = request.get_parent_rowids()\n # score_list = request.score_list\n # config = request.config\n\n unique_qaids, groupxs = ut.group_indices(qaid_list)\n # grouped_qaids_list = ut.apply_grouping(qaid_list, groupxs)\n grouped_daids = ut.apply_grouping(daid_list, groupxs)\n grouped_scores = ut.apply_grouping(score_list, groupxs)\n\n ibs = depc.controller\n unique_qnids = ibs.get_annot_nids(unique_qaids)\n\n # scores\n _iter = zip(unique_qaids, unique_qnids, grouped_daids, grouped_scores)\n for qaid, qnid, daids, scores in _iter:\n dnids = ibs.get_annot_nids(daids)\n\n # Remove distance to self\n annot_scores = np.array(scores)\n daid_list_ = np.array(daids)\n dnid_list_ = np.array(dnids)\n\n is_valid = daid_list_ != qaid\n daid_list_ = daid_list_.compress(is_valid)\n dnid_list_ = dnid_list_.compress(is_valid)\n annot_scores = annot_scores.compress(is_valid)\n\n # Hacked in version of creating an annot match object\n match_result = wbia.AnnotMatch()\n match_result.qaid = qaid\n match_result.qnid = qnid\n match_result.daid_list = daid_list_\n match_result.dnid_list = dnid_list_\n match_result._update_daid_index()\n match_result._update_unique_nid_index()\n\n grouped_annot_scores = vt.apply_grouping(annot_scores, match_result.name_groupxs)\n name_scores = np.array([np.sum(dists) for dists in grouped_annot_scores])\n match_result.set_cannonical_name_score(annot_scores, name_scores)\n yield match_result\n\n\nclass DeepsenseConfig(dt.Config): # NOQA\n \"\"\"\n CommandLine:\n python -m wbia_deepsense._plugin --test-DeepsenseConfig\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia_deepsense._plugin import * # NOQA\n >>> config = DeepsenseConfig()\n >>> result = config.get_cfgstr()\n >>> print(result)\n Deepsense(dim_size=2000)\n \"\"\"\n\n def get_param_info_list(self):\n return [\n ut.ParamInfo('dim_size', DIM_SIZE),\n ]\n\n\nclass DeepsenseRequest(dt.base.VsOneSimilarityRequest):\n _symmetric = False\n _tablename = 'Deepsense'\n\n @ut.accepts_scalar_input\n def get_fmatch_overlayed_chip(request, aid_list, config=None):\n depc = request.depc\n ibs = depc.controller\n passport_paths = ibs.depc_annot.get(\n 'DeepsensePassport',\n aid_list,\n 'image',\n config=config,\n read_extern=False,\n ensure=True,\n )\n passports = list(map(vt.imread, passport_paths))\n return passports\n\n def render_single_result(request, cm, aid, **kwargs):\n # HACK FOR WEB VIEWER\n chips = request.get_fmatch_overlayed_chip([cm.qaid, aid], config=request.config)\n import vtool as vt\n\n out_img = vt.stack_image_list(chips)\n return out_img\n\n def postprocess_execute(request, table, parent_rowids, rowids, result_list):\n qaid_list, daid_list = list(zip(*parent_rowids))\n score_list = ut.take_column(result_list, 0)\n depc = request.depc\n config = request.config\n cm_list = list(get_match_results(depc, qaid_list, daid_list, score_list, config))\n table.delete_rows(rowids)\n return cm_list\n\n def execute(request, *args, **kwargs):\n # kwargs['use_cache'] = False\n result_list = super(DeepsenseRequest, request).execute(*args, **kwargs)\n qaids = kwargs.pop('qaids', None)\n if qaids is not None:\n result_list = [result for result in result_list if result.qaid in qaids]\n return result_list\n\n\n@register_preproc_annot(\n tablename='Deepsense',\n parents=[ANNOTATION_TABLE, ANNOTATION_TABLE],\n colnames=['score'],\n coltypes=[float],\n configclass=DeepsenseConfig,\n requestclass=DeepsenseRequest,\n fname='deepsense',\n rm_extern_on_delete=True,\n chunksize=None,\n)\ndef wbia_plugin_deepsense(depc, qaid_list, daid_list, config):\n r\"\"\"\n CommandLine:\n python -m wbia_deepsense._plugin --exec-wbia_plugin_deepsense\n python -m wbia_deepsense._plugin --exec-wbia_plugin_deepsense:0\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from wbia_deepsense._plugin import *\n >>> import wbia\n >>> import itertools as it\n >>> import utool as ut\n >>> from wbia.init import sysres\n >>> import numpy as np\n >>> dbdir = sysres.ensure_testdb_identification_example()\n >>> ibs = wbia.opendb(dbdir=dbdir)\n >>> depc = ibs.depc_annot\n >>> gid_list, aid_list = ibs._wbia_plugin_deepsense_init_testdb()\n >>> # For tests, make a (0, 0, 1, 1) bbox with the same name in the same image for matching\n >>> annot_uuid_list = ibs.get_annot_uuids(aid_list)\n >>> annot_name_list = ibs.get_annot_names(aid_list)\n >>> aid_list_ = ibs.add_annots(gid_list, [(0, 0, 1, 1)] * len(gid_list), name_list=annot_name_list)\n >>> qaid = aid_list[0]\n >>> qannot_name = annot_name_list[0]\n >>> qaid_list = [qaid]\n >>> daid_list = aid_list + aid_list_\n >>> root_rowids = tuple(zip(*it.product(qaid_list, daid_list)))\n >>> config = DeepsenseConfig()\n >>> # Call function via request\n >>> request = DeepsenseRequest.new(depc, qaid_list, daid_list)\n >>> result = request.execute()\n >>> am = result[0]\n >>> unique_nids = am.unique_nids\n >>> name_score_list = am.name_score_list\n >>> unique_name_text_list = ibs.get_name_texts(unique_nids)\n >>> name_score_list_ = ['%0.04f' % (score, ) for score in am.name_score_list]\n >>> name_score_dict = dict(zip(unique_name_text_list, name_score_list_))\n >>> print('Queried Deepsense algorithm for ground-truth ID = %s' % (qannot_name, ))\n >>> result = ut.repr3(name_score_dict)\n >>> print(result)\n {\n '64edec9a-b998-4f96-a9d6-6dddcb8f8c0a': '0.8082',\n '825c5de0-d764-464c-91b6-9e507c5502fd': '0.0000',\n 'bf017955-9ed9-4311-96c9-eed4556cdfdf': '0.0000',\n 'e36c9f90-6065-4354-822d-c0fef25441ad': '0.0001',\n }\n \"\"\"\n ibs = depc.controller\n\n qaids = list(set(qaid_list))\n daids = list(set(daid_list))\n\n assert len(qaids) == 1\n qaid = qaids[0]\n annot_uuid = ibs.get_annot_uuids(qaid)\n resp_json = ibs.wbia_plugin_deepsense_identify(\n annot_uuid, use_depc=True, config=config\n )\n # update response_json to use flukebook names instead of deepsense\n\n dnames = ibs.get_annot_name_texts(daids)\n name_counter_dict = {}\n for daid, dname in zip(daids, dnames):\n if dname in [None, const.UNKNOWN]:\n continue\n if dname not in name_counter_dict:\n name_counter_dict[dname] = 0\n name_counter_dict[dname] += 1\n\n ids = resp_json['identification']\n name_score_dict = {}\n for rank, result in enumerate(ids):\n name = result['flukebook_id']\n name_score = result['probability']\n name_counter = name_counter_dict.get(name, 0)\n if name_counter <= 0:\n if name_score > 0.01:\n args = (\n name,\n rank,\n name_score,\n len(daids),\n )\n logger.info(\n 'Suggested match name = %r (rank %d) with score = %0.04f is not in the daids (total %d)'\n % args\n )\n continue\n assert name_counter >= 1\n annot_score = name_score / name_counter\n\n assert (\n name not in name_score_dict\n ), 'Deepsense API response had multiple scores for name = %r' % (name,)\n name_score_dict[name] = annot_score\n\n dname_list = ibs.get_annot_name_texts(daid_list)\n for qaid, daid, dname in zip(qaid_list, daid_list, dname_list):\n value = name_score_dict.get(dname, 0)\n yield (value,)\n\n\n# @register_ibs_method\n# def deepsense_embed(ibs):\n# ut.embed()\n\n\n# Metadata schema:\n# ● Image : str column with image names\n# ● whaleId : int id of whale present on image\n# ● callosity : int\n# ● blowhead_x : int x coordinate of whales blowhead\n# ● blowhead_y : int y coordinate of whales blowhead\n# ● bonnet_x : int x coordinate of whales bonnet\n# ● bonnet_y : int y coordinate of whales bonnet\n# ● height : int image height\n# ● width : int image width\n# ● bbox1_x : float x coordinate of whales bbox top left corner\n# ● bbox1_y : float y coordinate of whales bbox top left corner\n# ● bbox2_x : float x coordinate of whales bbox bottom right corner\n# ● bbox2_y : float y coordinate of whales bbox bottom right corner\n# example rows:\n# Image,whaleID,bbox1_x,bbox1_y,bbox2_x,bbox2_y,height,width,callosity,bonnet_x,bonnet_y,blowhead_x,blowhead_y\n# 10000.jpg,1950,757,593,1009,839,1360,2048,2,898,656,804,754\n@register_ibs_method\ndef deepsense_retraining_metadata(ibs, species='Eubalaena australis'):\n aid_list = ibs.get_valid_aids(species=species)\n return ibs.deepsense_retraining_metadata_list(aid_list)\n\n\n@register_ibs_method\ndef deepsense_retraining_metadata_rotated(ibs, species='Eubalaena australis'):\n logger.info('getting aids')\n aid_list = ibs.get_valid_aids(species=species)\n logger.info('generating metadata')\n csv_str = ibs.deepsense_retraining_metadata_list(aid_list)\n logger.info('converting metadata to dicts')\n csv_dict = csv_string_to_dicts(csv_str)\n logger.info('rotating those dicts')\n rotated_dicts = [rotate_row(row) for row in csv_dict]\n logger.info('back to a string')\n rotated_str = array_of_dicts_to_csv(rotated_dicts)\n return rotated_str\n\n\n@register_ibs_method\ndef deepsense_retraining_metadata_list(ibs, aid_list):\n num_annots = len(aid_list)\n fpaths = [ibs.deepsense_annot_training_chip_fpath(aid) for aid in aid_list]\n fpaths = [ibs.deepsense_annot_training_chip_fpath(aid) for aid in aid_list]\n assert len(fpaths) == num_annots\n names = ibs.get_annot_nids(aid_list)\n assert len(names) == num_annots\n keypoints = ibs.depc_annot.get('DeepsenseTraining', aid_list, 'response')\n # contains keypoint['blowhead']['x'] and keypoint['bonnet']['y'] etc\n # check that keypoints are relative the chip_fpath or the image_fpath\n # keypoints = [keypoint['keypoints'] for keypoint in keypoints]\n # assert len(keypoints) == num_annots\n\n # is this worth it to only list-traverse once? lol. seems likely over-optimization.\n # THIS MUST BE WHERE BAD THINGS HAPPEN\n blow_xs = [row['keypoints']['blowhead']['x'] for row in keypoints]\n blow_ys = [row['keypoints']['blowhead']['y'] for row in keypoints]\n bonn_xs = [row['keypoints']['bonnet']['x'] for row in keypoints]\n bonn_ys = [row['keypoints']['bonnet']['y'] for row in keypoints]\n\n # blowx_blowy_bonx_bony = [\n # [keypoint['blowhead']['x'], keypoint['blowhead']['y'],\n # keypoint['bonnet']['x'], keypoint['bonnet']['y']]\n # for keypoint in keypoints\n # ]\n # blow_xs, blow_ys, bonn_xs, bonn_ys = np.transpose(blowx_blowy_bonx_bony)\n # for feat_list in (blow_xs, blow_ys, bonn_xs, bonn_ys):\n # assert len(feat_list) == num_annots\n\n # TODO: optimize this so it doesn't have to actually load all the images\n gid_list = ibs.get_annot_gids(aid_list)\n wh_list = ibs.get_image_sizes(gid_list)\n assert len(wh_list) == num_annots\n widths = [wh[0] for wh in wh_list]\n heights = [wh[1] for wh in wh_list]\n\n bboxes = ibs.get_annot_bboxes(aid_list)\n assert len(bboxes) == num_annots\n bbox1_xs = [bbox[0] for bbox in bboxes]\n bbox1_ys = [bbox[1] for bbox in bboxes]\n bbox2_xs = [bbox[2] for bbox in bboxes]\n bbox2_ys = [bbox[3] for bbox in bboxes]\n\n # trying this bc don't trust the widths and heights above\n widths = [x2 - x1 for (x1, x2) in zip(bbox1_xs, bbox2_xs)]\n logger.info('10 widths: %s' % widths[:10])\n heights = [y2 - y1 for (y1, y2) in zip(bbox1_ys, bbox2_ys)]\n\n callosities = [0] * num_annots\n\n header_row = [\n 'Image',\n 'whaleID',\n 'callosity',\n 'blowhead_x',\n 'blowhead_y',\n 'bonnet_x',\n 'bonnet_y',\n 'height',\n 'width',\n 'bbox1_x',\n 'bbox1_y',\n 'bbox2_x',\n 'bbox2_y',\n ]\n\n # we could skip zipping below by using ut.make_standard_csv\n full_ans = np.array(\n [\n fpaths,\n names,\n callosities,\n blow_xs,\n blow_ys,\n bonn_xs,\n bonn_ys,\n heights,\n widths,\n bbox1_xs,\n bbox1_ys,\n bbox2_xs,\n bbox2_ys,\n ]\n )\n\n # cleaned_ans = ibs.heuristically_clean_trainingset(full_ans)\n # csv_str = ut.make_standard_csv(cleaned_ans, header_row)\n\n csv_str = ut.make_standard_csv(full_ans, header_row)\n return csv_str\n\n\n@register_ibs_method\ndef deepsense_retraining_metadata_end_to_end(ibs, aid_list):\n num_annots = len(aid_list)\n fpaths = [ibs.deepsense_annot_chip_fpath(aid) for aid in aid_list]\n assert len(fpaths) == num_annots\n names = ibs.get_annot_nids(aid_list)\n assert len(names) == num_annots\n keypoints = ibs.depc_annot.get('DeepsenseKeypoint', aid_list, 'response')\n blow_xs = [row['keypoints']['blowhead']['x'] for row in keypoints]\n blow_ys = [row['keypoints']['blowhead']['y'] for row in keypoints]\n bonn_xs = [row['keypoints']['bonnet']['x'] for row in keypoints]\n bonn_ys = [row['keypoints']['bonnet']['y'] for row in keypoints]\n\n # TODO: optimize this so it doesn't have to actually load all the images\n gid_list = ibs.get_annot_gids(aid_list)\n wh_list = ibs.get_image_sizes(gid_list)\n assert len(wh_list) == num_annots\n widths = [wh[0] for wh in wh_list]\n heights = [wh[1] for wh in wh_list]\n\n alignments = ibs.depc_annot.get('DeepsenseAlignment', aid_list, 'response')\n alignments = [a['localization'] for a in alignments]\n assert len(alignments) == num_annots\n bbox1_xs = [ali['bbox1']['x'] for ali in alignments]\n bbox1_ys = [ali['bbox1']['y'] for ali in alignments]\n bbox2_xs = [ali['bbox2']['x'] for ali in alignments]\n bbox2_ys = [ali['bbox2']['y'] for ali in alignments]\n\n sizes = [get_imagesize(f) for f in fpaths]\n widths = [size[0] for size in sizes]\n heights = [size[1] for size in sizes]\n\n callosities = [0] * num_annots\n\n header_row = [\n 'Image',\n 'whaleID',\n 'callosity',\n 'blowhead_x',\n 'blowhead_y',\n 'bonnet_x',\n 'bonnet_y',\n 'height',\n 'width',\n 'bbox1_x',\n 'bbox1_y',\n 'bbox2_x',\n 'bbox2_y',\n ]\n\n full_ans = np.array(\n [\n fpaths,\n names,\n callosities,\n blow_xs,\n blow_ys,\n bonn_xs,\n bonn_ys,\n heights,\n widths,\n bbox1_xs,\n bbox1_ys,\n bbox2_xs,\n bbox2_ys,\n ]\n )\n\n # cleaned_ans = ibs.heuristically_clean_trainingset(full_ans)\n # csv_str = ut.make_standard_csv(cleaned_ans, header_row)\n\n csv_str = ut.make_standard_csv(full_ans, header_row)\n return csv_str\n\n\ndef get_imagesize(fpath):\n im = Image.open(fpath)\n return im.size\n\n\n@register_ibs_method\ndef deepsense_retraining_metadata_passports(\n ibs, aid_list, passport_paths=None, chip_size=256\n):\n num_annots = len(aid_list)\n if passport_paths is None:\n passport_paths = ibs.depc_annot.get(\n 'DeepsensePassport',\n aid_list,\n 'image',\n config={},\n read_extern=False,\n ensure=True,\n )\n fpaths = passport_paths\n assert len(fpaths) == num_annots\n names = ibs.get_annot_nids(aid_list)\n names = ibs.get_name_texts(names)\n names = ibs.deepsense_name_texts_to_neaq_ids(names)\n assert len(names) == num_annots\n\n # construct keypoints\n # Here we're using the same fixed keypoints that are used to make the passport\n bonn_xs = [int(chip_size / 2)] * num_annots\n bonn_ys = [int(chip_size / 4)] * num_annots\n blow_xs = [int(chip_size / 2)] * num_annots\n blow_ys = [int(chip_size * 3 / 4)] * num_annots\n\n bbox1_xs = [0] * num_annots\n bbox1_ys = [0] * num_annots\n bbox2_xs = [chip_size] * num_annots\n bbox2_ys = [chip_size] * num_annots\n widths = [chip_size] * num_annots\n heights = [chip_size] * num_annots\n\n callosities = [0] * num_annots\n\n header_row = [\n 'Image',\n 'whaleID',\n 'callosity',\n 'blowhead_x',\n 'blowhead_y',\n 'bonnet_x',\n 'bonnet_y',\n 'height',\n 'width',\n 'bbox1_x',\n 'bbox1_y',\n 'bbox2_x',\n 'bbox2_y',\n ]\n\n # we could skip zipping below by using ut.make_standard_csv\n full_ans = np.array(\n [\n fpaths,\n names,\n callosities,\n blow_xs,\n blow_ys,\n bonn_xs,\n bonn_ys,\n heights,\n widths,\n bbox1_xs,\n bbox1_ys,\n bbox2_xs,\n bbox2_ys,\n ]\n )\n\n # cleaned_ans = ibs.heuristically_clean_trainingset(full_ans)\n # csv_str = ut.make_standard_csv(cleaned_ans, header_row)\n\n csv_str = ut.make_standard_csv(full_ans, header_row)\n logger.info('converting metadata to dicts')\n csv_dict = ibs.csv_string_to_dicts(csv_str)\n # TODO: want to clean this here or solve nameless things somewhere else?\n # csv_dict = ibs.deepsense_clean_metadata_dict(csv_dict)\n logger.info('rotating those dicts')\n rotated_dicts = [rotate_row(row) for row in csv_dict]\n logger.info('back to a string')\n rotated_str = ibs.array_of_dicts_to_csv(rotated_dicts)\n\n return rotated_str\n\n\n@register_ibs_method\ndef deepsense_name_texts_to_neaq_ids(ibs, name_texts, container_name):\n neaq_to_name_text = ibs.wbia_plugin_deepsense_ensure_id_map()\n name_text_to_neaq = {neaq_to_name_text[val]: val for val in neaq_to_name_text}\n ans = name_texts.copy()\n for i in range(len(name_texts)):\n name = name_texts[i]\n if name in name_text_to_neaq:\n ans[i] = name_text_to_neaq[name]\n return ans\n\n\n@register_ibs_method\ndef deepsense_clean_csv_metadata_dict(ibs, csv_dict):\n # removes rows with an unknown name\n ans = [row for row in csv_dict if row['whaleID'] != '____']\n return ans\n\n\n@register_ibs_method\ndef heuristically_clean_trainingset(ibs, metadata_dicts):\n\n logger.info('heuristically_clean_trainingset called on %s rows' % len(metadata_dicts))\n\n clean_rows = [row for row in metadata_dicts if good_row_heuristic(row)]\n logger.info('heuristically_clean_trainingset now has %s rows' % len(clean_rows))\n diff = len(metadata_dicts) - len(clean_rows)\n percent = 100 * diff / len(metadata_dicts)\n logger.info(' we removed %s rows, %s%%' % (diff, percent))\n return clean_rows\n\n\ndef filter_only_resights(metadata_dicts, min_resights=2):\n ids = [row['whaleID'] for row in metadata_dicts]\n counts = [ids.count(i) for i in ids]\n filtered = [\n row for (row, count) in zip(metadata_dicts, counts) if count >= min_resights\n ]\n return filtered\n\n\ndef good_row_heuristic(dict_row):\n blowhead = (int(dict_row['blowhead_x']), int(dict_row['blowhead_y']))\n bonnet = (int(dict_row['bonnet_x']), int(dict_row['bonnet_y']))\n return (\n point_in_middle_half_by_height(blowhead)\n and point_in_middle_half_by_height(bonnet)\n and p1_is_left_of_p2(bonnet, blowhead)\n )\n\n\n# because sometimes our keypoints don't fall in the central square\ndef point_within_aoi(x, y, width, height, delta=10):\n # box_height = height 3\n # box_width = width / 3\n # here assuming height/width refer to the subset\n return (\n x > width - delta\n and x < 2 * width + delta\n and y > height - delta\n and y < 2 * height + delta\n )\n\n\ndef point_in_middle_half_by_height(p, w=256, h=256):\n py = p[1]\n return py > h / 4 and py < 3 * h / 4\n\n\ndef p1_is_left_of_p2(p1, p2):\n p1x, p2x = p1[0], p2[0]\n return p1x < p2x\n\n\n# goal is to overlay the bbox, blowhole and bonnet from the deepsense metadata\n@register_ibs_method\ndef deepsense_illustrate_metadata(\n ibs,\n species,\n limit=10,\n imgdir='/home/wildme/code/ibeis-deepsense-module/retraining/check_trainingset/',\n):\n aid_list = ibs.get_valid_aids(species=species)\n aid_list = aid_list[:limit]\n\n metadata = ibs.deepsense_retraining_metadata_list(aid_list)\n dicts = csv_string_to_dicts(metadata)\n\n for i in range(len(dicts)):\n illustrate_metadata_helper(dicts[i], i, imgdir)\n\n return dicts\n\n\ndef illustrate_metadata_helper(row, i, imgdir):\n pil_img = Image.open(row['Image'])\n canvas = Image.new('RGB', pil_img.size)\n canvas.paste(pil_img)\n draw = ImageDraw.Draw(canvas)\n\n blowhead_point = (int(row['blowhead_x']), int(row['blowhead_y']))\n blowhead_coords = bounding_box_at_centerpoint(blowhead_point)\n draw.ellipse(blowhead_coords, outline='green', width=2)\n\n bonnet_point = (int(row['bonnet_x']), int(row['bonnet_y']))\n bonnet_coords = bounding_box_at_centerpoint(bonnet_point)\n draw.ellipse(bonnet_coords, outline='red', width=2)\n\n ut.ensuredir(imgdir)\n output_filepath = join(imgdir, (str(i) + '.jpg'))\n logger.info('saving to %s' % output_filepath)\n canvas.save(output_filepath)\n return canvas\n\n\n@register_ibs_method\ndef csv_string_to_dicts(ibs, csvstring):\n csvstring = csvstring.replace('\\r', '')\n rows = csvstring.split('\\n')\n rows = [row.split(',') for row in rows]\n header = rows[0]\n rows = rows[1:-1] # -1 bc of a trailing empty string from initial split\n dicts = [{header[i]: row[i] for i in range(len(header))} for row in rows]\n return dicts\n\n\n# assumes every dict has same keys as the first one\n@register_ibs_method\ndef array_of_dicts_to_csv(ibs, dicts):\n headers = list(dicts[0].keys())\n values = [[d[header] for header in headers] for d in dicts]\n # transpose to work with ut.make_standard_csv\n values = np.array(values).T\n csv_str = ut.make_standard_csv(values, headers)\n return csv_str\n\n\ndef rotate_row(csv_row):\n\n fpath = csv_row['Image']\n np_img = load_image_np(fpath)\n # np.rot90 is counterclockwise\n np_img = np.rot90(np_img)\n imgname = fpath.split('/')[-1]\n new_path = '/home/wildme/code/ibeis-deepsense-module/retraining/rotated_passports/'\n new_path = new_path + imgname\n im = Image.fromarray(np_img)\n im.save(new_path)\n csv_row['Image'] = new_path\n\n bonnet = (csv_row['bonnet_x'], csv_row['bonnet_y'])\n rotated_bonn = rotate_90(bonnet)\n csv_row['bonnet_x'] = rotated_bonn[0]\n csv_row['bonnet_y'] = rotated_bonn[1]\n\n blow = (csv_row['blowhead_x'], csv_row['blowhead_y'])\n rotated_blow = rotate_90(blow)\n csv_row['blowhead_x'] = rotated_blow[0]\n csv_row['blowhead_y'] = rotated_blow[1]\n\n bbox1 = (csv_row['bbox1_x'], csv_row['bbox1_y'])\n rotated_bbox1 = rotate_90(bbox1)\n csv_row['bbox1_x'] = rotated_bbox1[0]\n csv_row['bbox1_y'] = rotated_bbox1[1]\n\n bbox2 = (csv_row['bbox2_x'], csv_row['bbox2_y'])\n rotated_bbox2 = rotate_90(bbox2)\n csv_row['bbox2_x'] = rotated_bbox2[0]\n csv_row['bbox2_y'] = rotated_bbox2[1]\n\n csv_row['width'], csv_row['height'] = csv_row['height'], csv_row['width']\n\n return csv_row\n\n\n@register_ibs_method\ndef subsample_matching_distribution_from_file(ibs, src_fpath, target_fpath):\n with open(src_fpath, 'r') as file:\n src_csv = file.read()\n with open(target_fpath, 'r') as file:\n target_csv = file.read()\n\n source_metadata = ibs.csv_string_to_dicts(src_csv)\n target_metadata = ibs.csv_string_to_dicts(target_csv)\n\n return subsample_matching_distribution(source_metadata, target_metadata)\n\n\n# resample source_metadata (a csv dict) to match the sightings/individual distribution of target_metadata\ndef subsample_matching_distribution(source_metadata, target_metadata):\n from random import sample\n\n src_names = [row['whaleID'] for row in source_metadata]\n tgt_names = [row['whaleID'] for row in target_metadata]\n src_name_lookup = get_lookup_dict(src_names)\n tgt_name_lookup = get_lookup_dict(tgt_names)\n\n src_hist = [\n {'name': name, 'count': len(src_name_lookup[name])} for name in set(src_names)\n ]\n tgt_hist = [\n {'name': name, 'count': len(tgt_name_lookup[name])} for name in set(tgt_names)\n ]\n\n src_hist = sorted(src_hist, key=lambda i: i['count'])\n tgt_hist = sorted(tgt_hist, key=lambda i: i['count'])\n initial_target_sighting_dist = [row['count'] for row in tgt_hist]\n\n # remove singletons\n src_hist = remove_singletons(src_hist)\n tgt_hist = remove_singletons(tgt_hist)\n\n # we now need to subsample tgt_hist so that it has the same number of rows (names) as src_hist\n if len(tgt_hist) > len(src_hist):\n tgt_hist = sample(tgt_hist, len(src_hist))\n tgt_hist = sorted(tgt_hist, key=lambda i: i['count'])\n\n target_sighting_dist = [row['count'] for row in tgt_hist]\n\n # sort the histograms\n\n subsampled_src = []\n already_sampled_rows = []\n\n for i, row in zip(range(len(tgt_hist)), tgt_hist):\n tgt_count = row['count']\n src_row = get_next_row_for_subsampling(tgt_count, src_hist, already_sampled_rows)\n if src_row > len(src_hist):\n break\n name = src_hist[src_row]['name']\n name_rows = src_name_lookup[name]\n assert (\n len(name_rows) >= tgt_count\n ), 'We messed up subsampling: not enough sightings for this name'\n if len(name_rows) > tgt_count:\n name_rows = sample(name_rows, tgt_count)\n for src_row in name_rows:\n subsampled_src.append(source_metadata[src_row])\n\n # now we validate the distribution of sightings per name\n final_names = [row['whaleID'] for row in subsampled_src]\n final_name_lookup = get_lookup_dict(final_names)\n final_hist = [\n {'name': n, 'count': len(final_name_lookup[n])} for n in set(final_name_lookup)\n ]\n final_sighting_dist = [row['count'] for row in final_hist]\n\n initial_target_mean = np.mean(initial_target_sighting_dist)\n initial_target_std = np.std(initial_target_sighting_dist)\n logger.info(\n 'Initial Target sighting dist: mean=%2f, std=%2f'\n % (initial_target_mean, initial_target_std)\n )\n\n target_mean = np.mean(target_sighting_dist)\n target_std = np.std(target_sighting_dist)\n logger.info(\n 'Target sighting distribution: mean=%2f, std=%2f' % (target_mean, target_std)\n )\n\n final_mean = np.mean(final_sighting_dist)\n final_std = np.std(final_sighting_dist)\n logger.info(\n 'Final sighting distribution: mean=%2f, std=%2f' % (final_mean, final_std)\n )\n\n csv_str = array_of_dicts_to_csv(None, subsampled_src)\n\n return csv_str\n\n\n# generates the whale_ids.csv file that deepsense uses internally to map whale IDs\n@register_ibs_method\ndef deepsense_internal_mapping_csv(ibs, csv_dict):\n names = [row['whaleID'] for row in csv_dict]\n sorted_names = list(set(names))\n sorted_names.sort()\n name_dict = [\n {'indexID': i, 'whaleID': sorted_names[i]} for i in range(len(sorted_names))\n ]\n name_str = ibs.array_of_dicts_to_csv(name_dict)\n return name_str\n\n\ndef remove_singletons(sorted_name_histogram):\n cutoff = 0\n while sorted_name_histogram[cutoff]['count'] < 2:\n cutoff += 1\n return sorted_name_histogram[cutoff:]\n\n\ndef get_next_row_for_subsampling(tgt_count, sorted_histogram, already_sampled_rows):\n if len(already_sampled_rows) == 0:\n next_row = 0\n else:\n next_row = already_sampled_rows[-1] + 1\n while (\n next_row < len(sorted_histogram)\n and sorted_histogram[next_row]['count'] < tgt_count\n ):\n next_row += 1\n # now next_row is the first row in sorted_histogram with count at least tgt_count\n already_sampled_rows.append(next_row)\n return next_row\n\n\n# given a list, returns a dict (multimap) where the keys are the listvalues and the values are the indices\ndef get_lookup_dict(val_list):\n lookup_dict = {}\n for value, i in zip(val_list, range(len(val_list))):\n add_to_multimap(lookup_dict, value, i)\n return lookup_dict\n\n\ndef add_to_multimap(multimap, key, value):\n if key in multimap:\n multimap[key] += [value]\n else:\n multimap[key] = [value]\n return multimap\n\n\ndef rotate_90(xy, img_radius=128):\n # move center of image to origin\n translated = (int(xy[0]) - img_radius, int(xy[1]) - img_radius)\n # rotate 90 degrees counterclockwise around center\n rotated_translated = (-translated[1], translated[0])\n # translate back to original position\n rotated = (rotated_translated[0] + img_radius, rotated_translated[1] + img_radius)\n return rotated\n\n\ndef load_image_np(infilename):\n img = Image.open(infilename)\n data = np.array(img)\n return data\n\n\nRETRAINING_DIR = '/home/wildme/code/ibeis-deepsense-module/retraining/code/whales/'\nNUM_CLASSES_TAG = \"'num_classes':\"\n\n\n# TODO: complete this method\n@register_ibs_method\ndef update_deepsense_training_configs(ibs, metadata_fpath, retraining_dir=RETRAINING_DIR):\n\n assert exists(metadata_fpath), 'No metadata file at %s' % metadata_fpath\n\n # exp_name name is the name of the file (in between last slash and .csv)\n # exp_name = metadata_fpath.split('/')[-1].split('.csv')[0]\n\n # now find neptune.yaml and pipeline_config.py\n neptune_yaml_fpath = retraining_dir + 'neptune.yaml'\n assert exists(neptune_yaml_fpath), (\n 'Could not find neptune.yaml at %s' % neptune_yaml_fpath\n )\n pipeline_config_fpath = retraining_dir + 'pipeline_config.py'\n assert exists(pipeline_config_fpath), (\n 'Could not find pipeline_config.py at %s' % pipeline_config_fpath\n )\n\n # update pipeline_config.py so that it has the correct num_classes\n with open(metadata_fpath, 'r') as f:\n csv_str = f.read()\n csv_dict = ibs.csv_string_to_dicts(csv_str)\n names = [row['whaleID'] for row in csv_dict]\n num_classes = len(set(names))\n # we need to find the _first_ row that says 'num_classes': X and replace X with correct num_classes\n with open(pipeline_config_fpath, 'r') as f:\n pipeline_config = f.read()\n pipeline_config_rows = pipeline_config.split('\\n')\n num_classes_row_i = first_row_with_substr(pipeline_config, NUM_CLASSES_TAG)\n num_classes_row_str = pipeline_config_rows[num_classes_row_i]\n new_num_classes_row_str = update_num_classes_row(num_classes_row_str, num_classes)\n pipeline_config_rows[num_classes_row_i] = new_num_classes_row_str\n new_pipeline_config = '\\n'.join(pipeline_config_rows)\n\n return new_pipeline_config\n\n # now save new_pipeline_config\n\n # also save old pipeline_config in a cruft directory?\n\n # then do the same for neptune.yaml\n\n\ndef first_row_with_substr(string, substring):\n rows = string.split('\\n')\n for i in range(len(rows)):\n if substring in rows[i]:\n return i\n return None\n\n\ndef update_num_classes_row(rowstr, new_num_classes):\n before = rowstr.split(\"'num_classes':\")[0]\n return before + NUM_CLASSES_TAG + ' ' + str(new_num_classes) + ','\n\n\nif __name__ == '__main__':\n r\"\"\"\n CommandLine:\n python -m wbia_deepsense._plugin --allexamples\n \"\"\"\n import multiprocessing\n\n multiprocessing.freeze_support() # for win32\n import utool as ut # NOQA\n\n ut.doctest_funcs()\n"
] |
[
[
"numpy.rot90",
"numpy.all",
"numpy.concatenate",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.sum",
"numpy.hypot"
]
] |
sunutf/TSM
|
[
"fe42b612e0d39a61dbebf8aa6a0b93e62ec4a858"
] |
[
"ops/channel_non_local.py"
] |
[
"# Non-local block using embedded gaussian\n# Code from\n# https://github.com/AlexHex7/Non-local_pytorch/blob/master/Non-Local_pytorch_0.3.1/lib/non_local_embedded_gaussian.py\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass _NonLocalBlockND(nn.Module):\n def __init__(self, in_channels, inter_channels_q=None, inter_channels_kv=None, dimension=3, sub_sample=True, bn_layer=True):\n super(_NonLocalBlockND, self).__init__()\n\n assert dimension in [1, 2, 3]\n\n self.dimension = dimension\n self.sub_sample = sub_sample\n\n self.in_channels = in_channels\n self.inter_channels_q = inter_channels_q\n self.inter_channels_kv = inter_channels_kv\n\n if self.inter_channels_q is None:\n self.inter_channels_q = in_channels // 2\n if self.inter_channels_q == 0:\n self.inter_channels_q = 1\n \n if self.inter_channels_kv is None:\n self.inter_channels_kv = in_channels // 2\n if self.inter_channels_kv == 0:\n self.inter_channels_kv = 1\n\n if dimension == 3:\n conv_nd = nn.Conv3d\n max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))\n bn = nn.BatchNorm3d\n elif dimension == 2:\n conv_nd = nn.Conv2d\n max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\n bn = nn.BatchNorm2d\n else:\n conv_nd = nn.Conv1d\n max_pool_layer = nn.MaxPool1d(kernel_size=(2))\n bn = nn.BatchNorm1d\n\n self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels_kv,\n kernel_size=1, stride=1, padding=0)\n\n if bn_layer:\n self.W = nn.Sequential(\n conv_nd(in_channels=self.inter_channels_q, out_channels=self.in_channels,\n kernel_size=1, stride=1, padding=0),\n bn(self.in_channels)\n )\n nn.init.constant_(self.W[1].weight, 0)\n nn.init.constant_(self.W[1].bias, 0)\n else:\n self.W = conv_nd(in_channels=self.inter_channels_q, out_channels=self.in_channels,\n kernel_size=1, stride=1, padding=0)\n nn.init.constant_(self.W.weight, 0)\n nn.init.constant_(self.W.bias, 0)\n\n self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels_q,\n kernel_size=1, stride=1, padding=0)\n self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels_kv,\n kernel_size=1, stride=1, padding=0)\n\n if sub_sample:\n self.g = nn.Sequential(self.g, max_pool_layer)\n self.phi = nn.Sequential(self.phi, max_pool_layer)\n\n def forward(self, x):\n '''\n :param x: (b, c, t, h, w)\n :return:\n '''\n\n batch_size = x.size(0)\n\n g_x = self.g(x).view(batch_size, self.inter_channels_kv, -1)\n g_x = g_x.permute(0, 2, 1)#B,C,HW => B,HW,C''\n\n theta_x = self.theta(x).view(batch_size, self.inter_channels_q, -1)#B,C,HW => B,C',HW\n #theta_x = theta_x.permute(0, 2, 1)\n phi_x = self.phi(x).view(batch_size, self.inter_channels_kv, -1)\n phi_x = phi_x.permute(0,2,1) #B,C,HW => B,HW,C''\n f = torch.matmul(theta_x, phi_x)\n f_div_C = F.softmax(f, dim=-1)\n f_dif_C = f_div_C.permute(0,2,1) #B,C'',C'\n\n #y = torch.matmul(f_div_C, g_x)\n y = torch.matmul(g_x, f_dif_C) #B,HW,C'' x B,C'',C' => B,HW,C'\n y = y.permute(0, 2, 1).contiguous() # => B,C',HW \n y = y.view(batch_size, self.inter_channels_kv, *x.size()[2:])\n W_y = self.W(y) #=>B,C,H,W\n z = W_y + x\n\n return z\n\n\nclass CNONLocalBlock1D(_NonLocalBlockND):\n def __init__(self, in_channels, inter_channels_q=None, inter_channels_kv=None, sub_sample=True, bn_layer=True):\n super(CNONLocalBlock1D, self).__init__(in_channels,\n inter_channels_q = inter_channels_q,\n inter_channels_kv = inter_channels_kv,\n dimension=1, sub_sample=sub_sample,\n bn_layer=bn_layer)\n\n\nclass CNONLocalBlock2D(_NonLocalBlockND):\n def __init__(self, in_channels, inter_channels_q=None, inter_channels_kv=None, sub_sample=True, bn_layer=True):\n super(CNONLocalBlock2D, self).__init__(in_channels,\n inter_channels_q = inter_channels_q,\n inter_channels_kv = inter_channels_kv,\n dimension=2, sub_sample=sub_sample,\n bn_layer=bn_layer)\n\n\nclass CNONLocalBlock3D(_NonLocalBlockND):\n def __init__(self, in_channels, inter_channels_q=None, inter_channels_kv=None, sub_sample=False, bn_layer=True):\n super(CNONLocalBlock3D, self).__init__(in_channels,\n inter_channels_q = inter_channels_q,\n inter_channels_kv = inter_channels_kv,\n dimension=3, sub_sample=sub_sample,\n bn_layer=bn_layer)\n\n\nclass CNL3DWrapper(nn.Module):\n def __init__(self, block, n_segment):\n super(CNL3DWrapper, self).__init__()\n self.block = block\n self.cnl = CNONLocalBlock3D(block.bn3.num_features)\n self.n_segment = n_segment\n\n def forward(self, x):\n x = self.block(x)\n\n nt, c, h, w = x.size()\n x = x.view(nt // self.n_segment, self.n_segment, c, h, w).transpose(1, 2) # n, c, t, h, w\n x = self.cnl(x)\n x = x.transpose(1, 2).contiguous().view(nt, c, h, w)\n return x\n\n\ndef make_c_non_local(net, n_segment):\n import torchvision\n import archs\n if isinstance(net, torchvision.models.ResNet):\n '''\n net.layer2 = nn.Sequential(\n CNL3DWrapper(net.layer2[0], n_segment),\n net.layer2[1],\n CNL3DWrapper(net.layer2[2], n_segment),\n net.layer2[3],\n )\n net.layer3 = nn.Sequential(\n CNL3DWrapper(net.layer3[0], n_segment),\n net.layer3[1],\n CNL3DWrapper(net.layer3[2], n_segment),\n net.layer3[3],\n CNL3DWrapper(net.layer3[4], n_segment),\n net.layer3[5],\n )\n '''\n net.layer4 = nn.Sequential(\n CNL3DWrapper(net.layer4[0], n_segment),\n net.layer4[1],\n CNL3DWrapper(net.layer4[2], n_segment),\n )\n\n else:\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n from torch.autograd import Variable\n import torch\n\n sub_sample = True\n bn_layer = True\n\n img = Variable(torch.zeros(2, 3, 20))\n net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)\n out = net(img)\n print(out.size())\n\n img = Variable(torch.zeros(2, 3, 20, 20))\n net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)\n out = net(img)\n print(out.size())\n\n img = Variable(torch.randn(2, 3, 10, 20, 20))\n net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)\n out = net(img)\n print(out.size())\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.nn.init.constant_",
"torch.randn",
"torch.matmul",
"torch.nn.MaxPool3d",
"torch.nn.MaxPool2d",
"torch.nn.MaxPool1d"
]
] |
CVanchieri/forty-jekyll-theme
|
[
"454941578bb3e0d0f5d4db75907ba7df036b925a"
] |
[
"posts/DecisionTreeFromScratchPost/dt.py"
] |
[
"# necessary imports\r\nimport numpy as np\r\n\r\n\"\"\"\r\n### Decision Tree Class ###\r\nDecision trees are one way to display an algorithm that only contains conditional control statements,\r\ncommonly used in operations research, specifically in decision analysis, to help identify a strategy\r\nmost likely to reach a goal, but are also a popular tool in machine learning.\r\n\"\"\"\r\nclass DecisionTree(object): # create a decision tree class 'CART'\r\n def __init__(self, max_depth, min_splits): # init constructor method\r\n self.max_depth = max_depth # set the self.max_depth equal to the max_depth value\r\n self.min_splits = min_splits # set the self.mon_splits equal to _min_splits value\r\n \"\"\"\r\n ### Fit: feature, label ###\r\n Model fitting is a measure of how well a machine learning model generalizes to similar data to that\r\n on which it was trained. A model that is well-fitted produces more accurate outcomes. A model that is\r\n overfitted matches the data too closely. A model that is underfitted doesn't match closely enough.\r\n \"\"\"\r\n def fit(self, feature, label): # fit method\r\n self.feature = feature # set the self.feature equal to the feature value\r\n self.label = label # set the self.label equal to the label value\r\n self.train_data = np.column_stack((self.feature,self.label)) # set the self.train_data with column stack from numpy on the self.feature & self.label\r\n self.build_tree() # build the tree\r\n \"\"\"\r\n ### Gini Impurity: groups, class labels ###\r\n Gini Impurity tells us what is the probability of misclassifying an observation and is used in calculation\r\n of the split, the lower the gini score the better the split is.\r\n \"\"\"\r\n def gini(self, groups, class_labels): # compute gini similiarity method\r\n number_sample = sum([len(group) for group in groups]) # set the num_sample equal to the sum of the length of group in groups\r\n gini_score = 0 # set the gini_score equal to 0\r\n\r\n for group in groups: # for loop, group in groups\r\n size = float(len(group)) # set the size equal to the length of group as a float\r\n\r\n if size == 0: # if the size is equal to 0\r\n continue # continue\r\n score = 0.0 # set the score equal to 0.0\r\n\r\n for label in class_labels: # for loop, label in class_labels\r\n porportion = (group[:,-1] == label).sum() / size # set the proprotion equal to the all but the last item in the group labels sum divided by the size\r\n score += porportion * porportion # add proprotion times proprotion to the score\r\n gini_score += (1.0 - score) * (size/number_sample) # add 1 minus the score times the size divided by the num_sample\r\n\r\n return gini_score # return the gini_score\r\n \"\"\"\r\n ### Terminal Node: _group ###\r\n Terminal nodes (leaf nodes) are the final nodes that do not split further.\r\n \"\"\"\r\n def term_node(self, group): # terminal node method\r\n class_labels, count = np.unique(group[:,-1], return_counts= True) # set a class_labels count equal to the unique count of all class_labels but the last\r\n return class_labels[np.argmax(count)] # return the class_labels count\r\n \"\"\"\r\n ### Split: index, val, data ###\r\n Splitting a node into two sub-nodes is called splitting. It happens at all nodes except leaf nodes (terminal nodes).\r\n \"\"\"\r\n def split(self, index, val, data): # split method\r\n data_l = np.array([]).reshape(0,self.train_data.shape[1]) # set the data_l equal the reshaped train data array\r\n data_r = np.array([]).reshape(0, self.train_data.shape[1]) # set the data_r equal the reshaped train data array\r\n\r\n for row in data: # for loop, row in data\r\n if row[index] <= val: # if the row index value is less than or equal to the val\r\n data_l = np.vstack((data_l,row)) # set the data_left equal to the vertial stack of the data_l and row\r\n\r\n if row[index] > val: # if the row index value is greater than the val\r\n data_r = np.vstack((data_r, row)) # set the data_right equal to the vertial stack of the data_r and row\r\n\r\n return data_l, data_r # return the data_l value and data_r value\r\n \"\"\"\r\n ### Best Split: data ###\r\n Best split uses the gini score and initial split to check all the values of each attribute and calculates the cost\r\n of the split to find the best possible split.\r\n \"\"\"\r\n def best_split(self, data): # best split method\r\n class_labels = np.unique(data[:,-1]) # set the class_labels equal to all the unique values of data but the last\r\n best_index = 999 # set the best_index to equal 999\r\n best_val = 999 # set the best_val to equal 999\r\n best_score = 999 # set the best_score equal to 999\r\n best_groups = None # set the best_groups equal to None\r\n\r\n for i in range(data.shape[1]-1): # for loop, i in the range of the data reshaped\r\n for row in data: # for loop, row in data\r\n groups = self.split(i, row[i], data) # set groups equal to the split function on i , row[i], and data\r\n gini_score = self.gini(groups,class_labels) # set the gini_score equal the the gini function on groups and class labels\r\n\r\n if gini_score < best_score: # if gini_score is less than the best_score\r\n best_index = i # set the best_index equal to i value\r\n best_val = row[i] # set the best_val equal to row[i] value\r\n best_score = gini_score # set the best_score equal to gini_score\r\n best_groups = groups # set the best_groups equal to groups\r\n result = {} # create an empty dictionary\r\n result['index'] = best_index # set the result index equal to the best_index\r\n result['val'] = best_val # set the result val equal to the best_val\r\n result['groups'] = best_groups # set the result groups equal to the best_groups\r\n return result # return the result\r\n \"\"\"\r\n ### Recursive Split: node, depth ###\r\n Recursively split the data and check for early stop arguments to create terminal node.\r\n \"\"\"\r\n def rec_split(self, node, depth): # split branch method\r\n l_node , r_node = node['groups'] # split node groups into l_node and r_node\r\n del(node['groups']) # deleted the node groups\r\n\r\n if not isinstance(l_node,np.ndarray) or not isinstance(r_node,np.ndarray): # if its not in the left_node or right_node ndoe array\r\n node['left'] = self.term_node(l_node + r_node) # set the left node equal to the terminal_node on the left_node and the right_node\r\n node['right'] = self.term_node(l_node + r_node) # set the right node equal to the terminal_node on the left_node and the right_node\r\n return\r\n\r\n if depth >= self.max_depth: # if the depth is greater than or equal to the max_depth\r\n node['left'] = self.term_node(l_node) # set the left node equal to the terminal_node on the left_node\r\n node['right'] = self.term_node(r_node) # set the right node equal to the terminal_node ob the right_node\r\n return\r\n\r\n if len(l_node) <= self.min_splits: # if the length of the left_node is less than or equal to the min_splits\r\n node['left'] = self.terminal_node(l_node) # set the left node equal to the terminal_node on the left_node\r\n else: # else\r\n node['left'] = self.best_split(l_node) # set the left node equal to the best_split on the left_node\r\n self.rec_split(node['left'],depth + 1) # split_branch on the left node with depth and 1\r\n\r\n if len(r_node) <= self.min_splits: # if the length of the right is less than or equal to the min_splits\r\n node['right'] = self.terminal_node(r_node) # set the right node equal to the terminal_node on the right_node\r\n else:\r\n node['right'] = self.best_split(r_node) # set the right node equal to the best_split of the right_node\r\n self.rec_split(node['right'],depth + 1) # split_branch on the right node with depth and 1\r\n \"\"\"\r\n ### Build Tree: ###\r\n Build the tree starts at the root node, then uses the best split on itself recursively to construct the entire tree.\r\n \"\"\"\r\n def build_tree(self): # build tree method\r\n self.root = self.best_split(self.train_data) # set the root equal to the best_split on the train_data\r\n self.rec_split(self.root, 1) # split_branch on the root with 1\r\n return self.root # return the root\r\n \"\"\"\r\n ### Predict: node, row ###\r\n Node prediction checks if the node is either a terminal value to be returned as the prediction, or if it is a dictionary\r\n node containing another level to be checked.\r\n \"\"\"\r\n def pred_(self, node, row): # predict method\r\n if row[node['index']] < node['val']: # if the row node index is less tha nthe node val\r\n if isinstance(node['left'], dict): # if the node left is dictionary\r\n return self.pred_(node['left'], row) # return the _predict of the node left and row\r\n else: # else\r\n return node['left'] # return the node left\r\n\r\n else: # else\r\n if isinstance(node['right'],dict): # if the node right is dictionary\r\n return self.pred_(node['right'],row) # return the _predict of the node right and row\r\n else: # else\r\n return node['right'] # return the node right\r\n\r\n def pred(self, test_data): # predict method\r\n self.pred_label = np.array([]) # set the predicted_label to an empty array\r\n for i in test_data: # for loop, idx in test_data\r\n self.pred_label = np.append(self.pred_label, self.pred_(self.root,i)) # append the predicted_label and _predict of the root with idx to the predicted_label\r\n\r\n return self.pred_label # return the predicted_label\r\n"
] |
[
[
"numpy.unique",
"numpy.argmax",
"numpy.column_stack",
"numpy.array",
"numpy.vstack"
]
] |
Healthedata1/pubsub-endpoint
|
[
"89169850b0f05c92498061a7efe2c696cbd35029"
] |
[
"app.py"
] |
[
"# A very simple Flask app to get started with using\n# FHIR Subscriptions\n# This is a reciever for the FHIR R4 Server URL (https://subscriptions.argo.run/)\n# with an ednpoint = \"http://healthedatainc2.pythonanywhere.com/webhook\"\n# It just saves the subscription notification data to a flat csv file \"data.csv\"\n# to initialize the data.csv:\n#\n# data = dict(\n# timestamp = [], #Bundle['timestamp']\n# foo = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode']\n# status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode']\n# topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri']\n# event_id = [], # Bundle['entry'][0]['fullUri']\n# )\n# df = pd.DataFrame(data=data)\n# df\n#\n# file_name = 'data.csv'\n# df.to_csv(file_name)\n# print(f'saving {file_name} as csv ...')\n\n# my_csv = pd.read_csv(file_name, index_col = 0)\n# my_csv#\n#\n# and display subscription notifications data\n# the csv file \"data.csv\" is consantly appended and not created each time\n\nfrom flask import Flask, request, Response, render_template, session\nimport os\nimport logging\nfrom datetime import datetime\nfrom json import dumps, loads\nimport pandas as pd\n\nlogging.basicConfig(\nfilename='demo.log',\nlevel=logging.DEBUG,\nformat='[%(asctime)s] %(levelname)s in %(module)s %(lineno)d}: %(message)s')\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.secret_key = 'my_secret_key'\n\nfile_name = 'data.csv'\n\nempty_table = dict(\n timestamp = [], #Bundle['timestamp']\n type = [], # Bundle['entry'][0]['resource']['parameter'][5]['valueCode']\n status = [], # Bundle['entry'][0]['resource']['parameter'][4]['valueCode']\n topic = [], # Bundle['entry'][0]['resource']['parameter'][1]['valueUri']\n event_id = [], # Bundle['entry'][0]['fullUri']\n )\n\n#see add_url_rule to conditionally open rest hook.= e.g after subscribing\"\n\n@app.route('/webhook', methods=['POST'])\ndef respond():\n # webhook logic to do something\n app.logger.info(request.headers)\n app.logger.info(request.json)\n try: # sometimes is empty\n bundle_event_id = request.json['entry'][1]['fullUrl']\n except IndexError: # if no entry that is OK\n #app.logger.exception(e)\n bundle_event_id = None\n except KeyError: # if no fullUrl that is no good\n #app.logger.exception(e)\n return Response(status=400)\n try: # if these are empty then fail\n bundle_ts = request.json['timestamp']\n params = request.json['entry'][0]['resource']['parameter']\n bundle_type = [param['valueCode'] for param in params if param['name']=='type'][0]\n bundle_status = [param['valueCode'] for param in params if param['name']=='status'][0]\n bundle_topic = [param['valueUri'] for param in params if param['name']=='topic'][0]\n except Exception as e: # work on python 3.x\n #app.logger.exception(e)\n return Response(status=400)\n else:\n df = pd.read_csv(file_name, index_col = 0)\n my_row = pd.Series(\n data = [bundle_ts,bundle_type,bundle_status,bundle_topic,bundle_event_id,],\n index=df.columns,\n )\n #app.logger.info(f'{df.shape[0]} rows')\n df = df.append(my_row, ignore_index=True)\n df.to_csv(file_name)\n #app.logger.info(f'saving {file_name} as csv ...')\n return Response(status=200)\n\n@app.route('/',methods = ['POST', 'GET'])\ndef html_table():\n #app.logger.info(f\"request.method = {request.method}\")\n if \"clear_button\" in request.form:\n #app.logger.info(\"clear table\")\n df = pd.DataFrame(data=empty_table)\n df.to_csv(file_name)\n df = pd.read_csv(file_name, index_col = 0, keep_default_na=False )\n #app.logger.info(\"update table\")\n return render_template('index.html',\n tables=[df.to_html(classes='data')],\n titles = df.columns.values,)\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
] |
[
[
"pandas.read_csv",
"pandas.Series",
"pandas.DataFrame"
]
] |
dbddqy/robot_kinematics
|
[
"55cb956869ac805b0caf629c40216e3149b3fd1a"
] |
[
"examples/trajectory.py"
] |
[
"#!/usr/bin/env python3\n\nfrom visual_kinematics.RobotSerial import *\nfrom visual_kinematics.RobotTrajectory import *\nimport numpy as np\nfrom math import pi\n\n\ndef main():\n np.set_printoptions(precision=3, suppress=True)\n\n dh_params = np.array([[0.163, 0., 0.5 * pi, 0.],\n [0., 0.632, pi, 0.5 * pi],\n [0., 0.6005, pi, 0.],\n [0.2013, 0., -0.5 * pi, -0.5 * pi],\n [0.1025, 0., 0.5 * pi, 0.],\n [0.094, 0., 0., 0.]])\n\n robot = RobotSerial(dh_params)\n\n # =====================================\n # trajectory\n # =====================================\n\n frames = [Frame.from_euler_3(np.array([0.5 * pi, 0., pi]), np.array([[0.28127], [0.], [1.13182]])),\n Frame.from_euler_3(np.array([0.25 * pi, 0., 0.75 * pi]), np.array([[0.48127], [0.], [1.13182]])),\n Frame.from_euler_3(np.array([0.5 * pi, 0., pi]), np.array([[0.48127], [0.], [0.63182]]))]\n time_points = np.array([0., 6., 10.])\n trajectory = RobotTrajectory(robot, frames, time_points)\n trajectory.show(motion=\"p2p\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.set_printoptions",
"numpy.array"
]
] |
elbeejay/pyvista
|
[
"ad469624e19cfa7c94a551475708e547d1341fa1"
] |
[
"tests/test_utilities.py"
] |
[
"\"\"\" test pyvista.utilities \"\"\"\nimport os\n\nimport numpy as np\nimport pytest\nimport vtk\n\nimport pyvista\nfrom pyvista import examples as ex\nfrom pyvista.utilities import errors\nfrom pyvista.utilities import fileio\nfrom pyvista.utilities import helpers\n\n# Only set this here just the once.\npyvista.set_error_output_file(os.path.join(os.path.dirname(__file__), 'ERROR_OUTPUT.txt'))\n\n\ndef test_createvectorpolydata_error():\n orig = np.random.random((3, 1))\n vec = np.random.random((3, 1))\n with pytest.raises(ValueError):\n helpers.vector_poly_data(orig, vec)\n\n\ndef test_createvectorpolydata_1D():\n orig = np.random.random(3)\n vec = np.random.random(3)\n vdata = helpers.vector_poly_data(orig, vec)\n assert np.any(vdata.points)\n assert np.any(vdata.point_arrays['vectors'])\n\n\ndef test_createvectorpolydata():\n orig = np.random.random((100, 3))\n vec = np.random.random((100, 3))\n vdata = helpers.vector_poly_data(orig, vec)\n assert np.any(vdata.points)\n assert np.any(vdata.point_arrays['vectors'])\n\n\ndef test_read(tmpdir):\n fnames = (ex.antfile, ex.planefile, ex.hexbeamfile, ex.spherefile,\n ex.uniformfile, ex.rectfile)\n types = (pyvista.PolyData, pyvista.PolyData, pyvista.UnstructuredGrid,\n pyvista.PolyData, pyvista.UniformGrid, pyvista.RectilinearGrid)\n for i, filename in enumerate(fnames):\n obj = fileio.read(filename)\n assert isinstance(obj, types[i])\n # Now test the standard_reader_routine\n for i, filename in enumerate(fnames):\n # Pass attrs to for the standard_reader_routine to be used\n obj = fileio.read(filename, attrs={'DebugOn': None})\n assert isinstance(obj, types[i])\n # this is also tested for each mesh types init from file tests\n filename = str(tmpdir.mkdir(\"tmpdir\").join('tmp.%s' % 'npy'))\n arr = np.random.rand(10, 10)\n np.save(filename, arr)\n with pytest.raises(IOError):\n _ = pyvista.read(filename)\n # read non existing file\n with pytest.raises(IOError):\n _ = pyvista.read('this_file_totally_does_not_exist.vtk')\n # Now test reading lists of files as multi blocks\n multi = pyvista.read(fnames)\n assert isinstance(multi, pyvista.MultiBlock)\n assert multi.n_blocks == len(fnames)\n nested = [ex.planefile,\n [ex.hexbeamfile, ex.uniformfile]]\n\n multi = pyvista.read(nested)\n assert isinstance(multi, pyvista.MultiBlock)\n assert multi.n_blocks == 2\n assert isinstance(multi[1], pyvista.MultiBlock)\n assert multi[1].n_blocks == 2\n\n\ndef test_get_array():\n grid = pyvista.UnstructuredGrid(ex.hexbeamfile)\n # add array to both point/cell data with same name\n carr = np.random.rand(grid.n_cells)\n grid._add_cell_array(carr, 'test_data')\n parr = np.random.rand(grid.n_points)\n grid._add_point_array(parr, 'test_data')\n # add other data\n oarr = np.random.rand(grid.n_points)\n grid._add_point_array(oarr, 'other')\n farr = np.random.rand(grid.n_points * grid.n_cells)\n grid._add_field_array(farr, 'field_data')\n assert np.allclose(carr, helpers.get_array(grid, 'test_data', preference='cell'))\n assert np.allclose(parr, helpers.get_array(grid, 'test_data', preference='point'))\n assert np.allclose(oarr, helpers.get_array(grid, 'other'))\n assert helpers.get_array(grid, 'foo') is None\n assert helpers.get_array(grid, 'test_data', preference='field') is None\n assert np.allclose(farr, helpers.get_array(grid, 'field_data', preference='field'))\n\n\ndef test_is_inside_bounds():\n data = ex.load_uniform()\n bnds = data.bounds\n assert helpers.is_inside_bounds((0.5, 0.5, 0.5), bnds)\n assert not helpers.is_inside_bounds((12, 5, 5), bnds)\n assert not helpers.is_inside_bounds((5, 12, 5), bnds)\n assert not helpers.is_inside_bounds((5, 5, 12), bnds)\n assert not helpers.is_inside_bounds((12, 12, 12), bnds)\n\n\ndef test_get_sg_image_scraper():\n scraper = pyvista._get_sg_image_scraper()\n assert isinstance(scraper, pyvista.Scraper)\n assert callable(scraper)\n\n\ndef test_voxelize():\n mesh = pyvista.PolyData(ex.load_uniform().points)\n vox = pyvista.voxelize(mesh, 0.5)\n assert vox.n_cells\n\n\ndef test_report():\n report = pyvista.Report(gpu=True)\n assert report is not None\n report = pyvista.Report(gpu=False)\n assert report is not None\n\n\ndef test_line_segments_from_points():\n points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])\n poly = pyvista.line_segments_from_points(points)\n assert poly.n_cells == 2\n assert poly.n_points == 4\n cells = poly.lines\n assert np.allclose(cells[:3], [2, 0, 1])\n assert np.allclose(cells[3:], [2, 2, 3])\n\n\ndef test_lines_from_points():\n points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])\n poly = pyvista.lines_from_points(points)\n assert poly.n_cells == 2\n assert poly.n_points == 3\n cells = poly.lines\n assert np.allclose(cells[:3], [2, 0, 1])\n assert np.allclose(cells[3:], [2, 1, 2])\n\n\ndef test_grid_from_sph_coords():\n x = np.arange(0.0, 360.0, 40.0) # longitude\n y = np.arange(0.0, 181.0, 60.0) # colatitude\n z = [1] # elevation (radius)\n g = pyvista.grid_from_sph_coords(x, y, z)\n assert g.n_cells == 24\n assert g.n_points == 36\n assert np.allclose(\n g.bounds,\n [\n -0.8137976813493738,\n 0.8660254037844387,\n -0.8528685319524434,\n 0.8528685319524433,\n -1.0,\n 1.0,\n ],\n )\n assert np.allclose(g.points[1], [0.8660254037844386, 0.0, 0.5])\n z = np.linspace(10, 30, 3)\n g = pyvista.grid_from_sph_coords(x, y, z)\n assert g.n_cells == 48\n assert g.n_points == 108\n assert np.allclose(g.points[0], [0.0, 0.0, 10.0])\n\n\ndef test_transform_vectors_sph_to_cart():\n lon = np.arange(0.0, 360.0, 40.0) # longitude\n lat = np.arange(0.0, 181.0, 60.0) # colatitude\n lev = [1] # elevation (radius)\n u, v = np.meshgrid(lon, lat, indexing=\"ij\")\n w = u ** 2 - v ** 2\n uu, vv, ww = pyvista.transform_vectors_sph_to_cart(lon, lat, lev, u, v, w)\n assert np.allclose(\n [uu[-1, -1], vv[-1, -1], ww[-1, -1]],\n [67.80403533828323, 360.8359915416445, -70000.0],\n )\n\n\ndef test_assert_empty_kwargs():\n kwargs = {}\n assert errors.assert_empty_kwargs(**kwargs)\n with pytest.raises(TypeError):\n kwargs = {\"foo\":6}\n errors.assert_empty_kwargs(**kwargs)\n with pytest.raises(TypeError):\n kwargs = {\"foo\":6, \"goo\":\"bad\"}\n errors.assert_empty_kwargs(**kwargs)\n\n\ndef test_convert_id_list():\n ids = np.array([4, 5, 8])\n id_list = vtk.vtkIdList()\n id_list.SetNumberOfIds(len(ids))\n for i, v in enumerate(ids):\n id_list.SetId(i, v)\n converted = helpers.vtk_id_list_to_array(id_list)\n assert np.allclose(converted, ids)\n\n\ndef test_progress_monitor():\n mesh = pyvista.Sphere()\n ugrid = mesh.delaunay_3d(progress_bar=True)\n assert isinstance(ugrid, pyvista.UnstructuredGrid)\n"
] |
[
[
"numpy.random.random",
"numpy.allclose",
"numpy.linspace",
"numpy.meshgrid",
"numpy.arange",
"numpy.save",
"numpy.random.rand",
"numpy.any",
"numpy.array"
]
] |
Sahar2/qiskit-terra
|
[
"19fbaeb68f2b279c9748384e919e1d1b006860f2"
] |
[
"qiskit/visualization/state_visualization.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name,ungrouped-imports,import-error\n\n\"\"\"\nVisualization functions for quantum states.\n\"\"\"\n\nfrom functools import reduce\nimport colorsys\nimport numpy as np\nfrom scipy import linalg\nfrom qiskit.quantum_info.operators.pauli import pauli_group, Pauli\nfrom .matplotlib import HAS_MATPLOTLIB\n\nif HAS_MATPLOTLIB:\n from matplotlib.ticker import MaxNLocator\n from matplotlib import pyplot as plt\n from matplotlib.patches import FancyArrowPatch\n from matplotlib.patches import Circle\n import matplotlib.colors as mcolors\n from matplotlib.colors import Normalize, LightSource\n import matplotlib.gridspec as gridspec\n from mpl_toolkits.mplot3d import proj3d\n from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n from qiskit.visualization.exceptions import VisualizationError\n from qiskit.visualization.bloch import Bloch\n from qiskit.visualization.utils import _validate_input_state\n\n\nif HAS_MATPLOTLIB:\n class Arrow3D(FancyArrowPatch):\n \"\"\"Standard 3D arrow.\"\"\"\n\n def __init__(self, xs, ys, zs, *args, **kwargs):\n \"\"\"Create arrow.\"\"\"\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n \"\"\"Draw the arrow.\"\"\"\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, _ = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\n\ndef plot_state_hinton(rho, title='', figsize=None):\n \"\"\"Plot a hinton diagram for the quanum state.\n\n Args:\n rho (ndarray): Numpy array for state vector or density matrix.\n title (str): a string that represents the plot title\n figsize (tuple): Figure size in inches.\n Returns:\n matplotlib.Figure: The matplotlib.Figure of the visualization\n\n Raises:\n ImportError: Requires matplotlib.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n rho = _validate_input_state(rho)\n if figsize is None:\n figsize = (8, 5)\n num = int(np.log2(len(rho)))\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)\n max_weight = 2 ** np.ceil(np.log(np.abs(rho).max()) / np.log(2))\n datareal = np.real(rho)\n dataimag = np.imag(rho)\n column_names = [bin(i)[2:].zfill(num) for i in range(2**num)]\n row_names = [bin(i)[2:].zfill(num) for i in range(2**num)]\n lx = len(datareal[0]) # Work out matrix dimensions\n ly = len(datareal[:, 0])\n # Real\n ax1.patch.set_facecolor('gray')\n ax1.set_aspect('equal', 'box')\n ax1.xaxis.set_major_locator(plt.NullLocator())\n ax1.yaxis.set_major_locator(plt.NullLocator())\n\n for (x, y), w in np.ndenumerate(datareal):\n color = 'white' if w > 0 else 'black'\n size = np.sqrt(np.abs(w) / max_weight)\n rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,\n facecolor=color, edgecolor=color)\n ax1.add_patch(rect)\n\n ax1.set_xticks(np.arange(0, lx+0.5, 1))\n ax1.set_yticks(np.arange(0, ly+0.5, 1))\n ax1.set_yticklabels(row_names, fontsize=14)\n ax1.set_xticklabels(column_names, fontsize=14, rotation=90)\n ax1.autoscale_view()\n ax1.invert_yaxis()\n ax1.set_title('Real[rho]', fontsize=14)\n # Imaginary\n ax2.patch.set_facecolor('gray')\n ax2.set_aspect('equal', 'box')\n ax2.xaxis.set_major_locator(plt.NullLocator())\n ax2.yaxis.set_major_locator(plt.NullLocator())\n\n for (x, y), w in np.ndenumerate(dataimag):\n color = 'white' if w > 0 else 'black'\n size = np.sqrt(np.abs(w) / max_weight)\n rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,\n facecolor=color, edgecolor=color)\n ax2.add_patch(rect)\n if np.any(dataimag != 0):\n ax2.set_xticks(np.arange(0, lx+0.5, 1))\n ax2.set_yticks(np.arange(0, ly+0.5, 1))\n ax2.set_yticklabels(row_names, fontsize=14)\n ax2.set_xticklabels(column_names, fontsize=14, rotation=90)\n ax2.autoscale_view()\n ax2.invert_yaxis()\n ax2.set_title('Imag[rho]', fontsize=14)\n if title:\n fig.suptitle(title, fontsize=16)\n plt.tight_layout()\n plt.close(fig)\n return fig\n\n\ndef plot_bloch_vector(bloch, title=\"\", ax=None, figsize=None):\n \"\"\"Plot the Bloch sphere.\n\n Plot a sphere, axes, the Bloch vector, and its projections onto each axis.\n\n Args:\n bloch (list[double]): array of three elements where [<x>, <y>, <z>]\n title (str): a string that represents the plot title\n ax (matplotlib.Axes): An Axes to use for rendering the bloch sphere\n figsize (tuple): Figure size in inches. Has no effect is passing `ax`.\n\n Returns:\n Figure: A matplotlib figure instance if `ax = None`.\n\n Raises:\n ImportError: Requires matplotlib.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n if figsize is None:\n figsize = (5, 5)\n B = Bloch(axes=ax)\n B.add_vectors(bloch)\n B.render(title=title)\n if ax is None:\n fig = B.fig\n fig.set_size_inches(figsize[0], figsize[1])\n plt.close(fig)\n return fig\n return None\n\n\ndef plot_bloch_multivector(rho, title='', figsize=None):\n \"\"\"Plot the Bloch sphere.\n\n Plot a sphere, axes, the Bloch vector, and its projections onto each axis.\n\n Args:\n rho (ndarray): Numpy array for state vector or density matrix.\n title (str): a string that represents the plot title\n figsize (tuple): Has no effect, here for compatibility only.\n\n Returns:\n Figure: A matplotlib figure instance if `ax = None`.\n\n Raises:\n ImportError: Requires matplotlib.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n rho = _validate_input_state(rho)\n num = int(np.log2(len(rho)))\n width, height = plt.figaspect(1/num)\n fig = plt.figure(figsize=(width, height))\n for i in range(num):\n ax = fig.add_subplot(1, num, i + 1, projection='3d')\n pauli_singles = [\n Pauli.pauli_single(num, i, 'X'),\n Pauli.pauli_single(num, i, 'Y'),\n Pauli.pauli_single(num, i, 'Z')\n ]\n bloch_state = list(\n map(lambda x: np.real(np.trace(np.dot(x.to_matrix(), rho))),\n pauli_singles))\n plot_bloch_vector(bloch_state, \"qubit \" + str(i), ax=ax,\n figsize=figsize)\n fig.suptitle(title, fontsize=16)\n plt.close(fig)\n return fig\n\n\ndef plot_state_city(rho, title=\"\", figsize=None, color=None,\n alpha=1):\n \"\"\"Plot the cityscape of quantum state.\n\n Plot two 3d bar graphs (two dimensional) of the real and imaginary\n part of the density matrix rho.\n\n Args:\n rho (ndarray): Numpy array for state vector or density matrix.\n title (str): a string that represents the plot title\n figsize (tuple): Figure size in inches.\n color (list): A list of len=2 giving colors for real and\n imaginary components of matrix elements.\n alpha (float): Transparency value for bars\n Returns:\n matplotlib.Figure: The matplotlib.Figure of the visualization\n\n Raises:\n ImportError: Requires matplotlib.\n ValueError: When 'color' is not a list of len=2.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n rho = _validate_input_state(rho)\n\n num = int(np.log2(len(rho)))\n # get the real and imag parts of rho\n datareal = np.real(rho)\n dataimag = np.imag(rho)\n\n # get the labels\n column_names = [bin(i)[2:].zfill(num) for i in range(2**num)]\n row_names = [bin(i)[2:].zfill(num) for i in range(2**num)]\n\n lx = len(datareal[0]) # Work out matrix dimensions\n ly = len(datareal[:, 0])\n xpos = np.arange(0, lx, 1) # Set up a mesh of positions\n ypos = np.arange(0, ly, 1)\n xpos, ypos = np.meshgrid(xpos+0.25, ypos+0.25)\n\n xpos = xpos.flatten()\n ypos = ypos.flatten()\n zpos = np.zeros(lx*ly)\n\n dx = 0.5 * np.ones_like(zpos) # width of bars\n dy = dx.copy()\n dzr = datareal.flatten()\n dzi = dataimag.flatten()\n\n if color is None:\n color = [\"#648fff\", \"#648fff\"]\n else:\n if len(color) != 2:\n raise ValueError(\"'color' must be a list of len=2.\")\n if color[0] is None:\n color[0] = \"#648fff\"\n if color[1] is None:\n color[1] = \"#648fff\"\n\n # set default figure size\n if figsize is None:\n figsize = (15, 5)\n\n fig = plt.figure(figsize=figsize)\n ax1 = fig.add_subplot(1, 2, 1, projection='3d')\n\n x = [0, max(xpos)+0.5, max(xpos)+0.5, 0]\n y = [0, 0, max(ypos)+0.5, max(ypos)+0.5]\n z = [0, 0, 0, 0]\n verts = [list(zip(x, y, z))]\n\n fc1 = generate_facecolors(xpos, ypos, zpos, dx, dy, dzr, color[0])\n for idx, cur_zpos in enumerate(zpos):\n if dzr[idx] > 0:\n zorder = 2\n else:\n zorder = 0\n b1 = ax1.bar3d(xpos[idx], ypos[idx], cur_zpos,\n dx[idx], dy[idx], dzr[idx],\n alpha=alpha, zorder=zorder)\n b1.set_facecolors(fc1[6*idx:6*idx+6])\n\n pc1 = Poly3DCollection(verts, alpha=0.15, facecolor='k',\n linewidths=1, zorder=1)\n\n if min(dzr) < 0 < max(dzr):\n ax1.add_collection3d(pc1)\n\n ax2 = fig.add_subplot(1, 2, 2, projection='3d')\n fc2 = generate_facecolors(xpos, ypos, zpos, dx, dy, dzi, color[1])\n for idx, cur_zpos in enumerate(zpos):\n if dzi[idx] > 0:\n zorder = 2\n else:\n zorder = 0\n b2 = ax2.bar3d(xpos[idx], ypos[idx], cur_zpos,\n dx[idx], dy[idx], dzi[idx],\n alpha=alpha, zorder=zorder)\n b2.set_facecolors(fc2[6*idx:6*idx+6])\n\n pc2 = Poly3DCollection(verts, alpha=0.2, facecolor='k',\n linewidths=1, zorder=1)\n\n if min(dzi) < 0 < max(dzi):\n ax2.add_collection3d(pc2)\n ax1.set_xticks(np.arange(0.5, lx+0.5, 1))\n ax1.set_yticks(np.arange(0.5, ly+0.5, 1))\n max_dzr = max(dzr)\n min_dzr = min(dzr)\n if max_dzr != min_dzr:\n ax1.axes.set_zlim3d(np.min(dzr), np.max(dzr)+1e-9)\n else:\n if min_dzr == 0:\n ax1.axes.set_zlim3d(np.min(dzr), np.max(dzr)+1e-9)\n else:\n ax1.axes.set_zlim3d(auto=True)\n ax1.zaxis.set_major_locator(MaxNLocator(5))\n ax1.w_xaxis.set_ticklabels(row_names, fontsize=14, rotation=45)\n ax1.w_yaxis.set_ticklabels(column_names, fontsize=14, rotation=-22.5)\n ax1.set_zlabel(\"Real[rho]\", fontsize=14)\n for tick in ax1.zaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n\n ax2.set_xticks(np.arange(0.5, lx+0.5, 1))\n ax2.set_yticks(np.arange(0.5, ly+0.5, 1))\n min_dzi = np.min(dzi)\n max_dzi = np.max(dzi)\n if min_dzi != max_dzi:\n eps = 0\n ax2.zaxis.set_major_locator(MaxNLocator(5))\n ax2.axes.set_zlim3d(np.min(dzi), np.max(dzi)+eps)\n else:\n if min_dzi == 0:\n ax2.set_zticks([0])\n eps = 1e-9\n ax2.axes.set_zlim3d(np.min(dzi), np.max(dzi)+eps)\n else:\n ax2.axes.set_zlim3d(auto=True)\n ax2.w_xaxis.set_ticklabels(row_names, fontsize=14, rotation=45)\n ax2.w_yaxis.set_ticklabels(column_names, fontsize=14, rotation=-22.5)\n ax2.set_zlabel(\"Imag[rho]\", fontsize=14)\n for tick in ax2.zaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n plt.suptitle(title, fontsize=16)\n plt.tight_layout()\n plt.close(fig)\n return fig\n\n\ndef plot_state_paulivec(rho, title=\"\", figsize=None, color=None):\n \"\"\"Plot the paulivec representation of a quantum state.\n\n Plot a bargraph of the mixed state rho over the pauli matrices\n\n Args:\n rho (ndarray): Numpy array for state vector or density matrix\n title (str): a string that represents the plot title\n figsize (tuple): Figure size in inches.\n color (list or str): Color of the expectation value bars.\n Returns:\n matplotlib.Figure: The matplotlib.Figure of the visualization\n Raises:\n ImportError: Requires matplotlib.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n rho = _validate_input_state(rho)\n if figsize is None:\n figsize = (7, 5)\n num = int(np.log2(len(rho)))\n labels = list(map(lambda x: x.to_label(), pauli_group(num)))\n values = list(map(lambda x: np.real(np.trace(np.dot(x.to_matrix(), rho))),\n pauli_group(num)))\n numelem = len(values)\n if color is None:\n color = \"#648fff\"\n\n ind = np.arange(numelem) # the x locations for the groups\n width = 0.5 # the width of the bars\n fig, ax = plt.subplots(figsize=figsize)\n ax.grid(zorder=0, linewidth=1, linestyle='--')\n ax.bar(ind, values, width, color=color, zorder=2)\n ax.axhline(linewidth=1, color='k')\n # add some text for labels, title, and axes ticks\n ax.set_ylabel('Expectation value', fontsize=14)\n ax.set_xticks(ind)\n ax.set_yticks([-1, -0.5, 0, 0.5, 1])\n ax.set_xticklabels(labels, fontsize=14, rotation=70)\n ax.set_xlabel('Pauli', fontsize=14)\n ax.set_ylim([-1, 1])\n ax.set_facecolor('#eeeeee')\n for tick in ax.xaxis.get_major_ticks()+ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n ax.set_title(title, fontsize=16)\n plt.close(fig)\n return fig\n\n\ndef n_choose_k(n, k):\n \"\"\"Return the number of combinations for n choose k.\n\n Args:\n n (int): the total number of options .\n k (int): The number of elements.\n\n Returns:\n int: returns the binomial coefficient\n \"\"\"\n if n == 0:\n return 0\n return reduce(lambda x, y: x * y[0] / y[1],\n zip(range(n - k + 1, n + 1),\n range(1, k + 1)), 1)\n\n\ndef lex_index(n, k, lst):\n \"\"\"Return the lex index of a combination..\n\n Args:\n n (int): the total number of options .\n k (int): The number of elements.\n lst (list): list\n\n Returns:\n int: returns int index for lex order\n\n Raises:\n VisualizationError: if length of list is not equal to k\n \"\"\"\n if len(lst) != k:\n raise VisualizationError(\"list should have length k\")\n comb = list(map(lambda x: n - 1 - x, lst))\n dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])\n return int(dualm)\n\n\ndef bit_string_index(s):\n \"\"\"Return the index of a string of 0s and 1s.\"\"\"\n n = len(s)\n k = s.count(\"1\")\n if s.count(\"0\") != n - k:\n raise VisualizationError(\"s must be a string of 0 and 1\")\n ones = [pos for pos, char in enumerate(s) if char == \"1\"]\n return lex_index(n, k, ones)\n\n\ndef phase_to_rgb(complex_number):\n \"\"\"Map a phase of a complexnumber to a color in (r,g,b).\n\n complex_number is phase is first mapped to angle in the range\n [0, 2pi] and then to the HSL color wheel\n \"\"\"\n angles = (np.angle(complex_number) + (np.pi * 4)) % (np.pi * 2)\n rgb = colorsys.hls_to_rgb(angles / (np.pi * 2), 0.5, 0.5)\n return rgb\n\n\ndef plot_state_qsphere(rho, figsize=None):\n \"\"\"Plot the qsphere representation of a quantum state.\n Here, the size of the points is proportional to the probability\n of the corresponding term in the state and the color represents\n the phase.\n\n Args:\n rho (ndarray): State vector or density matrix representation.\n of quantum state.\n figsize (tuple): Figure size in inches.\n\n Returns:\n Figure: A matplotlib figure instance.\n\n Raises:\n ImportError: Requires matplotlib.\n \"\"\"\n if not HAS_MATPLOTLIB:\n raise ImportError('Must have Matplotlib installed.')\n try:\n import seaborn as sns\n except ImportError:\n raise ImportError('Must have seaborn installed to use '\n 'plot_state_qsphere')\n rho = _validate_input_state(rho)\n if figsize is None:\n figsize = (7, 7)\n num = int(np.log2(len(rho)))\n\n # get the eigenvectors and eigenvalues\n we, stateall = linalg.eigh(rho)\n\n fig = plt.figure(figsize=figsize)\n gs = gridspec.GridSpec(nrows=3, ncols=3)\n\n ax = fig.add_subplot(gs[0:3, 0:3], projection='3d')\n ax.axes.set_xlim3d(-1.0, 1.0)\n ax.axes.set_ylim3d(-1.0, 1.0)\n ax.axes.set_zlim3d(-1.0, 1.0)\n ax.axes.grid(False)\n ax.view_init(elev=5, azim=275)\n\n for _ in range(2 ** num):\n # start with the max\n probmix = we.max()\n prob_location = we.argmax()\n if probmix > 0.001:\n # get the max eigenvalue\n state = stateall[:, prob_location]\n loc = np.absolute(state).argmax()\n\n # get the element location closes to lowest bin representation.\n for j in range(2 ** num):\n test = np.absolute(np.absolute(state[j]) -\n np.absolute(state[loc]))\n if test < 0.001:\n loc = j\n break\n\n # remove the global phase\n angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)\n angleset = np.exp(-1j * angles)\n state = angleset * state\n state.flatten()\n\n # start the plotting\n # Plot semi-transparent sphere\n u = np.linspace(0, 2 * np.pi, 25)\n v = np.linspace(0, np.pi, 25)\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones(np.size(u)), np.cos(v))\n ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k',\n alpha=0.05, linewidth=0)\n\n # Get rid of the panes\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n # Get rid of the spines\n ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n\n # Get rid of the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n\n d = num\n for i in range(2 ** num):\n\n # get x,y,z points\n element = bin(i)[2:].zfill(num)\n weight = element.count(\"1\")\n zvalue = -2 * weight / d + 1\n number_of_divisions = n_choose_k(d, weight)\n weight_order = bit_string_index(element)\n angle = (float(weight) / d) * (np.pi * 2) + \\\n (weight_order * 2 * (np.pi / number_of_divisions))\n\n if (weight > d / 2) or (((weight == d / 2) and\n (weight_order >= number_of_divisions / 2))):\n angle = np.pi - angle - (2 * np.pi / number_of_divisions)\n\n xvalue = np.sqrt(1 - zvalue ** 2) * np.cos(angle)\n yvalue = np.sqrt(1 - zvalue ** 2) * np.sin(angle)\n\n # get prob and angle - prob will be shade and angle color\n prob = np.real(np.dot(state[i], state[i].conj()))\n colorstate = phase_to_rgb(state[i])\n\n alfa = 1\n if yvalue >= 0.1:\n alfa = 1.0 - yvalue\n\n ax.plot([xvalue], [yvalue], [zvalue],\n markerfacecolor=colorstate,\n markeredgecolor=colorstate,\n marker='o', markersize=np.sqrt(prob) * 30, alpha=alfa)\n\n a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue],\n mutation_scale=20, alpha=prob, arrowstyle=\"-\",\n color=colorstate, lw=2)\n ax.add_artist(a)\n\n # add weight lines\n for weight in range(d + 1):\n theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n z = -2 * weight / d + 1\n r = np.sqrt(1 - z ** 2)\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n ax.plot(x, y, z, color=(.5, .5, .5), lw=1, ls=':', alpha=.5)\n\n # add center point\n ax.plot([0], [0], [0], markerfacecolor=(.5, .5, .5),\n markeredgecolor=(.5, .5, .5), marker='o', markersize=3,\n alpha=1)\n we[prob_location] = 0\n else:\n break\n\n n = 32\n theta = np.ones(n)\n\n ax2 = fig.add_subplot(gs[2:, 2:])\n ax2.pie(theta, colors=sns.color_palette(\"hls\", n), radius=0.75)\n ax2.add_artist(Circle((0, 0), 0.5, color='white', zorder=1))\n ax2.text(0, 0, 'Phase', horizontalalignment='center',\n verticalalignment='center', fontsize=14)\n\n offset = 0.95 # since radius of sphere is one.\n\n ax2.text(offset, 0, r'$0$', horizontalalignment='center',\n verticalalignment='center', fontsize=14)\n ax2.text(0, offset, r'$\\pi/2$', horizontalalignment='center',\n verticalalignment='center', fontsize=14)\n\n ax2.text(-offset, 0, r'$\\pi$', horizontalalignment='center',\n verticalalignment='center', fontsize=14)\n\n ax2.text(0, -offset, r'$3\\pi/2$', horizontalalignment='center',\n verticalalignment='center', fontsize=14)\n\n fig.tight_layout()\n plt.close(fig)\n\n return fig\n\n\ndef generate_facecolors(x, y, z, dx, dy, dz, color):\n \"\"\"Generates shaded facecolors for shaded bars.\n This is here to work around a Matplotlib bug\n where alpha does not work in Bar3D.\n Args:\n x (array_like): The x- coordinates of the anchor point of the bars.\n y (array_like): The y- coordinates of the anchor point of the bars.\n z (array_like): The z- coordinates of the anchor point of the bars.\n dx (array_like): Width of bars.\n dy (array_like): Depth of bars.\n dz (array_like): Height of bars.\n color (array_like): sequence of valid color specifications, optional\n Returns:\n list: Shaded colors for bars.\n \"\"\"\n cuboid = np.array([\n # -z\n (\n (0, 0, 0),\n (0, 1, 0),\n (1, 1, 0),\n (1, 0, 0),\n ),\n # +z\n (\n (0, 0, 1),\n (1, 0, 1),\n (1, 1, 1),\n (0, 1, 1),\n ),\n # -y\n (\n (0, 0, 0),\n (1, 0, 0),\n (1, 0, 1),\n (0, 0, 1),\n ),\n # +y\n (\n (0, 1, 0),\n (0, 1, 1),\n (1, 1, 1),\n (1, 1, 0),\n ),\n # -x\n (\n (0, 0, 0),\n (0, 0, 1),\n (0, 1, 1),\n (0, 1, 0),\n ),\n # +x\n (\n (1, 0, 0),\n (1, 1, 0),\n (1, 1, 1),\n (1, 0, 1),\n ),\n ])\n\n # indexed by [bar, face, vertex, coord]\n polys = np.empty(x.shape + cuboid.shape)\n # handle each coordinate separately\n for i, p, dp in [(0, x, dx), (1, y, dy), (2, z, dz)]:\n p = p[..., np.newaxis, np.newaxis]\n dp = dp[..., np.newaxis, np.newaxis]\n polys[..., i] = p + dp * cuboid[..., i]\n\n # collapse the first two axes\n polys = polys.reshape((-1,) + polys.shape[2:])\n\n facecolors = []\n if len(color) == len(x):\n # bar colors specified, need to expand to number of faces\n for c in color:\n facecolors.extend([c] * 6)\n else:\n # a single color specified, or face colors specified explicitly\n facecolors = list(mcolors.to_rgba_array(color))\n if len(facecolors) < len(x):\n facecolors *= (6 * len(x))\n\n normals = _generate_normals(polys)\n return _shade_colors(facecolors, normals)\n\n\ndef _generate_normals(polygons):\n \"\"\"\n Takes a list of polygons and return an array of their normals.\n Normals point towards the viewer for a face with its vertices in\n counterclockwise order, following the right hand rule.\n Uses three points equally spaced around the polygon.\n This normal of course might not make sense for polygons with more than\n three points not lying in a plane, but it's a plausible and fast\n approximation.\n Args:\n polygons (list): list of (M_i, 3) array_like, or (..., M, 3) array_like\n A sequence of polygons to compute normals for, which can have\n varying numbers of vertices. If the polygons all have the same\n number of vertices and array is passed, then the operation will\n be vectorized.\n Returns:\n normals: (..., 3) array_like\n A normal vector estimated for the polygon.\n \"\"\"\n if isinstance(polygons, np.ndarray):\n # optimization: polygons all have the same number of points, so can\n # vectorize\n n = polygons.shape[-2]\n i1, i2, i3 = 0, n//3, 2*n//3\n v1 = polygons[..., i1, :] - polygons[..., i2, :]\n v2 = polygons[..., i2, :] - polygons[..., i3, :]\n else:\n # The subtraction doesn't vectorize because polygons is jagged.\n v1 = np.empty((len(polygons), 3))\n v2 = np.empty((len(polygons), 3))\n for poly_i, ps in enumerate(polygons):\n n = len(ps)\n i1, i2, i3 = 0, n//3, 2*n//3\n v1[poly_i, :] = ps[i1, :] - ps[i2, :]\n v2[poly_i, :] = ps[i2, :] - ps[i3, :]\n\n return np.cross(v1, v2)\n\n\ndef _shade_colors(color, normals, lightsource=None):\n \"\"\"\n Shade *color* using normal vectors given by *normals*.\n *color* can also be an array of the same length as *normals*.\n \"\"\"\n if lightsource is None:\n # chosen for backwards-compatibility\n lightsource = LightSource(azdeg=225, altdeg=19.4712)\n\n def mod(v):\n return np.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)\n\n shade = np.array([np.dot(n / mod(n), lightsource.direction)\n if mod(n) else np.nan for n in normals])\n mask = ~np.isnan(shade)\n\n if mask.any():\n norm = Normalize(min(shade[mask]), max(shade[mask]))\n shade[~mask] = min(shade[mask])\n color = mcolors.to_rgba_array(color)\n # shape of color should be (M, 4) (where M is number of faces)\n # shape of shade should be (M,)\n # colors should have final shape of (M, 4)\n alpha = color[:, 3]\n colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color\n colors[:, 3] = alpha\n else:\n colors = np.asanyarray(color).copy()\n\n return colors\n"
] |
[
[
"numpy.imag",
"numpy.sqrt",
"numpy.linspace",
"numpy.max",
"numpy.any",
"numpy.cross",
"matplotlib.patches.FancyArrowPatch.__init__",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"numpy.arange",
"numpy.sin",
"scipy.linalg.eigh",
"numpy.real",
"numpy.asanyarray",
"numpy.size",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.NullLocator",
"matplotlib.pyplot.figure",
"numpy.log",
"numpy.min",
"numpy.isnan",
"matplotlib.patches.Circle",
"matplotlib.colors.LightSource",
"numpy.ndenumerate",
"numpy.array",
"matplotlib.pyplot.suptitle",
"numpy.meshgrid",
"matplotlib.patches.FancyArrowPatch.draw",
"matplotlib.pyplot.Rectangle",
"matplotlib.pyplot.figaspect",
"numpy.absolute",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.ones",
"matplotlib.ticker.MaxNLocator",
"numpy.angle",
"numpy.empty",
"matplotlib.colors.to_rgba_array"
]
] |
Huynhanh883/twitterBot
|
[
"6067d89abf65c6540254e7636d8f818f7f4ef08c"
] |
[
"analyze_stat.py"
] |
[
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 8 14:52:08 2019\r\n\r\n@author: edoardottt\r\n\r\nThis file contains code for analyze the database.\r\nIt uses matplotlib library to displays the result.\r\nIt shows a chart with likes, retweets, followers per day.\r\n\r\nThis file is under MIT License.\r\n\r\n\"\"\"\r\n\r\n# all libraries required\r\nimport os\r\nimport sys\r\nimport usage\r\nimport sqlite3\r\nimport hashlib\r\n\r\ntry:\r\n import matplotlib.pyplot as plt\r\n plt.figure(num='twitterBot stats')\r\nexcept Exception as ex:\r\n usage.print_usage(4)\r\n\r\ndb_filename = 'database.db'\r\ndb_is_new = not os.path.exists(db_filename)\r\nconn = sqlite3.connect(db_filename)\r\n\r\n\r\n# check the statistics for a user\r\ndef check_stat(username,password):\r\n\r\n timestamps = [] # contains all the timestamps saved in the records\r\n likes = [] # contains all the likes saved in the records\r\n retweets = [] # contains all the retweets saved in the records\r\n followers = [] # contains all the followers saved in the records\r\n d_likes = {} # dictionary with as keys = days & values = likes\r\n d_retweets = {} # dictionary with as keys = days & values = retweets\r\n d_followers = {} # dictionary with as keys = days & values = followers\r\n if(db_is_new):\r\n usage.print_usage(5)\r\n else:\r\n cursor = conn.cursor()\r\n # check if that user is in the database\r\n p = hashlib.sha256(password.encode('utf-8')).hexdigest()\r\n cursor.execute(\"SELECT * FROM users WHERE username = ? and password = ?\", (username,p))\r\n data = cursor.fetchone()\r\n if(data==None):\r\n print(\"There aren't data for this username.\")\r\n sys.exit()\r\n # if that user exists\r\n cursor.execute(\"SELECT * FROM analytics WHERE username = ?\", (username,))\r\n data=cursor.fetchall()\r\n if (data!=None):\r\n if(len(data)!=0):\r\n for record in data:\r\n timestamps += [record[1]] # save the timestamp\r\n likes += [int(record[2])] # save the likes count \r\n retweets += [int(record[3])] # save the retweets count\r\n followers += [int(record[4])] # save the followers count\r\n # In this for loop all the arrays here declared become dictionary in this way:\r\n # All the likes, followers and retweets counts are aggregate per day.\r\n # Remember timestamps[:-16] means yyyy-mm-dd\r\n for i in range(len(timestamps)):\r\n if (not(timestamps[i][:-16] in d_likes)):\r\n for j in range(len(timestamps)):\r\n if timestamps[i][:-16]==timestamps[j][:-16]:\r\n if timestamps[i][:-16] in d_likes:\r\n d_likes[timestamps[i][:-16]] += likes[j]\r\n else:\r\n d_likes[timestamps[i][:-16]] = likes[j]\r\n if timestamps[i][:-16] in d_retweets:\r\n d_retweets[timestamps[i][:-16]] += retweets[j]\r\n else:\r\n d_retweets[timestamps[i][:-16]] = retweets[j]\r\n if timestamps[i][:-16] in d_followers:\r\n d_followers[timestamps[i][:-16]] -= d_followers[timestamps[i][:-16]]\r\n d_followers[timestamps[i][:-16]] = followers[j]\r\n else:\r\n d_followers[timestamps[i][:-16]] = followers[j]\r\n # adjust plot settings\r\n plt.subplots_adjust(bottom=0.2)\r\n plt.xticks( rotation = 70 )\r\n ax=plt.gca()\r\n ax.xaxis_date()\r\n date = list(d_likes.keys())\r\n likes_vector = [d_likes[i] for i in date]\r\n retweets_vector = [d_retweets[i] for i in date]\r\n followers_vector = [d_followers[i] for i in date]\r\n plt.plot(date,likes_vector, '-r', marker='o', label='likes')\r\n plt.plot(date,retweets_vector, '-g', marker='o', label='retweets')\r\n plt.plot(date,followers_vector, '-b', marker='o', label='followers')\r\n # if first > last element so the legend is shown on the right. Otherwise It's shown on the left\r\n if (d_likes[list(d_likes.keys())[0]] > d_likes[list(d_likes.keys())[len(d_likes)-1]]):\r\n plt.legend(loc='upper right')\r\n else:\r\n plt.legend(loc='upper left')\r\n # Print the results\r\n print('Total likes: '+str(sum(likes)))\r\n print('Total retweets: '+str(sum(retweets)))\r\n # add the number label in all points\r\n for var_date,var_likes in zip(date, likes_vector):\r\n plt.text(var_date, var_likes, str(var_likes))\r\n for var_date,var_retweets in zip(date, retweets_vector):\r\n plt.text(var_date, var_retweets, str(var_retweets))\r\n for var_date,var_followers in zip(date, followers_vector):\r\n plt.text(var_date, var_followers, str(var_followers))\r\n plt.title('Statistics for '+username)\r\n plt.subplots_adjust(left=None, bottom=0.13, right=0.98, top=0.94, wspace=None, hspace=None)\r\n plt.show()\r\n else:\r\n print(\"There aren't data for this username.\")\r\n conn.close()"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
zhenzonglei/dnnbrain
|
[
"1808f08d2497df59ea695a1a0cd16c0129fbebbf"
] |
[
"dnnbrain/dnn/base.py"
] |
[
"import os\nimport cv2\nimport torch\nimport numpy as np\n\nfrom PIL import Image\nfrom os.path import join as pjoin\nfrom copy import deepcopy\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, Lasso\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import pairwise_distances\nfrom scipy.signal import periodogram\nfrom torchvision import transforms\n\nDNNBRAIN_MODEL = pjoin(os.environ['DNNBRAIN_DATA'], 'models')\n\n\ndef array_statistic(arr, method, axis=None, keepdims=False):\n \"\"\"\n extract statistic of an array\n\n Parameters:\n ----------\n arr[array]: a numpy array\n method[str]: feature extraction method\n axis[int|tuple]: None or int or tuple of ints\n Axis or axes along which to operate.\n If it's None, operate on the whole array.\n keepdims[bool]: keep the axis which is reduced\n\n Return:\n ------\n arr[array]: extracted statistic\n \"\"\"\n if method == 'max':\n arr = np.max(arr, axis, keepdims=keepdims)\n elif method == 'mean':\n arr = np.mean(arr, axis, keepdims=keepdims)\n elif method == 'median':\n arr = np.median(arr, axis, keepdims=keepdims)\n elif method == 'L1':\n arr = np.linalg.norm(arr, 1, axis, keepdims=keepdims)\n elif method == 'L2':\n arr = np.linalg.norm(arr, 2, axis, keepdims=keepdims)\n else:\n raise ValueError('Not supported method:', method)\n\n return arr\n\n\nclass ImageSet:\n \"\"\"\n Build a dataset to load image\n \"\"\"\n def __init__(self, img_dir, img_ids, labels=None, transform=None):\n \"\"\"\n Initialize ImageSet\n\n Parameters:\n ----------\n img_dir[str]: images' parent directory\n img_ids[list]: Each img_id is a path which can find the image file relative to img_dir.\n labels[list]: Each image's label.\n transform[callable function]: optional transform to be applied on a stimulus.\n \"\"\"\n self.img_dir = img_dir\n self.img_ids = img_ids\n self.labels = np.ones(len(self.img_ids)) if labels is None else labels\n self.labels = self.labels.astype(np.int64)\n self.transform = transforms.Compose([transforms.ToTensor()]) if transform is None else transform\n\n def __len__(self):\n \"\"\"\n Return the number of images\n \"\"\"\n return len(self.img_ids)\n\n def __getitem__(self, indices):\n \"\"\"\n Get image data and corresponding labels\n\n Parameter:\n ---------\n indices[int|list|slice]: subscript indices\n\n Returns:\n -------\n data[tensor]: image data with shape as (n_stim, n_chn, height, weight)\n labels[list]: image labels\n \"\"\"\n # check availability and do preparation\n if isinstance(indices, int):\n tmp_ids = [self.img_ids[indices]]\n labels = [self.labels[indices]]\n elif isinstance(indices, list):\n tmp_ids = [self.img_ids[idx] for idx in indices]\n labels = [self.labels[idx] for idx in indices]\n elif isinstance(indices, slice):\n tmp_ids = self.img_ids[indices]\n labels = self.labels[indices]\n else:\n raise IndexError(\"only integer, slices (`:`) and list are valid indices\")\n\n # load data\n data = torch.zeros(0)\n for img_id in tmp_ids:\n image = Image.open(pjoin(self.img_dir, img_id)).convert('RGB') # load image\n image = self.transform(image) # transform image\n image = torch.unsqueeze(image, 0)\n data = torch.cat((data, image))\n\n if data.shape[0] == 1:\n data = data[0]\n labels = labels[0] # len(labels) == 1\n\n return data, labels\n\n\nclass VideoSet:\n \"\"\"\n Dataset for video data\n \"\"\"\n def __init__(self, vid_file, frame_nums, labels=None, transform=None):\n \"\"\"\n Parameters:\n ----------\n vid_file[str]: video data file\n frame_nums[list]: sequence numbers of the frames of interest\n labels[list]: each frame's label\n transform[pytorch transform]\n \"\"\"\n self.vid_cap = cv2.VideoCapture(vid_file)\n self.frame_nums = frame_nums\n self.labels = np.ones(len(self.frame_nums)) if labels is None else labels\n self.labels = self.labels.astype(np.int64)\n self.transform = transforms.Compose([transforms.ToTensor()]) if transform is None else transform\n\n def __getitem__(self, indices):\n \"\"\"\n Get frame data and corresponding labels\n\n Parameter:\n ---------\n indices[int|list|slice]: subscript indices\n\n Returns:\n -------\n data[tensor]: frame data with shape as (n_stim, n_chn, height, weight)\n labels[list]: frame labels\n \"\"\"\n # check availability and do preparation\n if isinstance(indices, int):\n tmp_nums = [self.frame_nums[indices]]\n labels = [self.labels[indices]]\n elif isinstance(indices, list):\n tmp_nums = [self.frame_nums[idx] for idx in indices]\n labels = [self.labels[idx] for idx in indices]\n elif isinstance(indices, slice):\n tmp_nums = self.frame_nums[indices]\n labels = self.labels[indices]\n else:\n raise IndexError(\"only integer, slices (`:`) and list are valid indices\")\n\n # load data\n data = torch.zeros(0)\n for frame_num in tmp_nums:\n # get frame\n self.vid_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num-1)\n _, frame = self.vid_cap.read()\n frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n frame = self.transform(frame) # transform frame\n frame = torch.unsqueeze(frame, 0)\n data = torch.cat((data, frame))\n\n if data.shape[0] == 1:\n data = data[0]\n labels = labels[0] # len(labels) == 1\n\n return data, labels\n\n def __len__(self):\n \"\"\"\n Return the number of frames\n \"\"\"\n return len(self.frame_nums)\n\n\nclass UnivariatePredictionModel:\n\n def __init__(self, model_name=None, cv=3):\n \"\"\"\n Parameters:\n ----------\n model_name[str]: name of a model used to do prediction\n If is 'corr', it just uses correlation rather than prediction.\n cv[int]: cross validation fold number\n \"\"\"\n self.set(model_name, cv)\n\n def set(self, model_name=None, cv=None):\n \"\"\"\n Set some attributes\n\n Parameters:\n ----------\n model_name[str]: name of a model used to do prediction\n If is 'corr', it just uses correlation rather than prediction.\n cv[int]: cross validation fold number\n \"\"\"\n if model_name is None:\n pass\n elif model_name == 'lrc':\n self.model = LogisticRegression()\n self.score_evl = 'accuracy'\n elif model_name == 'svc':\n self.model = SVC(kernel='linear', C=0.025)\n self.score_evl = 'accuracy'\n elif model_name == 'glm':\n self.model = LinearRegression()\n self.score_evl = 'explained_variance'\n elif model_name == 'lasso':\n self.model = Lasso()\n self.score_evl = 'explained_variance'\n elif model_name == 'corr':\n self.model = model_name\n self.score_evl = 'R square'\n else:\n raise ValueError('unsupported model:', model_name)\n\n if cv is not None:\n self.cv = cv\n\n def predict(self, X, Y):\n \"\"\"\n Use all columns of X (one-by-one) to predict each column of Y;\n For each column of Y:\n Find the location of the column of X which has the maximal prediction score;\n Record the location, and corresponding score and model.\n\n Parameters:\n ----------\n X[ndarray]: shape=(n_sample, n_feature)\n Y[ndarray]: shape=(n_sample, n_target)\n\n Return:\n ------\n pred_dict[dict]:\n score[ndarray]: shape=(n_target,)\n model[ndarray]: shape=(n_target,)\n location[ndarray]: shape=(n_target,)\n If model_name == 'corr', the score is R square.\n And the model is None.\n \"\"\"\n assert X.ndim == 2, \"X's shape must be (n_sample, n_feature)!\"\n assert Y.ndim == 2, \"Y's shape must be (n_sample, n_target)!\"\n assert X.shape[0] == Y.shape[0], 'X and Y must have the ' \\\n 'same number of samples!'\n n_feat = X.shape[1]\n n_trg = Y.shape[1]\n scores = []\n models = []\n locations = []\n for trg_idx in range(n_trg):\n y = Y[:, trg_idx]\n if self.model == 'corr':\n scores_tmp = pairwise_distances(X.T, y.reshape(1, -1), 'correlation')\n scores_tmp = (1 - scores_tmp.ravel()) ** 2\n else:\n scores_tmp = []\n for feat_idx in range(n_feat):\n cv_scores = cross_val_score(self.model, X[:, feat_idx][:, None], y,\n scoring=self.score_evl, cv=self.cv)\n scores_tmp.append(np.mean(cv_scores))\n\n # find maximal score and its location\n max_feat_idx = np.argmax(scores_tmp)\n locations.append(max_feat_idx)\n max_score = scores_tmp[max_feat_idx]\n scores.append(max_score)\n\n # fit the model with maximal score\n if self.model == 'corr':\n models.append(None)\n else:\n max_model = self.model.fit(X[:, max_feat_idx][:, None], y)\n models.append(deepcopy(max_model))\n\n pred_dict = {\n 'score': np.array(scores),\n 'model': np.array(models),\n 'location': np.array(locations)\n }\n return pred_dict\n\n\nclass MultivariatePredictionModel:\n\n def __init__(self, model_name=None, cv=3):\n \"\"\"\n Parameters:\n ----------\n model_name[str]: name of a model used to do prediction\n cv[int]: cross validation fold number\n \"\"\"\n self.set(model_name, cv)\n\n def set(self, model_name=None, cv=None):\n \"\"\"\n Set some attributes\n\n Parameters:\n ----------\n model_name[str]: name of a model used to do prediction\n cv[int]: cross validation fold number\n \"\"\"\n if model_name is None:\n pass\n elif model_name == 'lrc':\n self.model = LogisticRegression()\n self.score_evl = 'accuracy'\n elif model_name == 'svc':\n self.model = SVC(kernel='linear', C=0.025)\n self.score_evl = 'accuracy'\n elif model_name == 'glm':\n self.model = LinearRegression()\n self.score_evl = 'explained_variance'\n elif model_name == 'lasso':\n self.model = Lasso()\n self.score_evl = 'explained_variance'\n else:\n raise ValueError('unsupported model:', model_name)\n\n if cv is not None:\n self.cv = cv\n\n def predict(self, X, Y):\n \"\"\"\n Use all columns of X to predict each column of Y.\n\n Parameters:\n ----------\n X[ndarray]: shape=(n_sample, n_feature)\n Y[ndarray]: shape=(n_sample, n_target)\n\n Return:\n ------\n pred_dict[dict]:\n score[ndarray]: shape=(n_target,)\n model[ndarray]: shape=(n_target,)\n \"\"\"\n assert X.ndim == 2, \"X's shape must be (n_sample, n_feature)!\"\n assert Y.ndim == 2, \"Y's shape must be (n_sample, n_target)!\"\n assert X.shape[0] == Y.shape[0], 'X and Y must have the ' \\\n 'same number of samples!'\n n_trg = Y.shape[1]\n scores = []\n models = []\n for trg_idx in range(n_trg):\n y = Y[:, trg_idx]\n cv_scores = cross_val_score(self.model, X, y,\n scoring=self.score_evl, cv=self.cv)\n # recording\n scores.append(np.mean(cv_scores))\n models.append(deepcopy(self.model.fit(X, y)))\n\n pred_dict = {\n 'score': np.array(scores),\n 'model': np.array(models)\n }\n return pred_dict\n\n\ndef dnn_mask(dnn_acts, channels=None, rows=None, columns=None):\n \"\"\"\n Extract DNN activation\n\n Parameters:\n ----------\n dnn_acts[array]: DNN activation\n A 4D array with its shape as (n_stim, n_chn, n_row, n_col)\n channels[list]: sequence numbers of channels of interest.\n rows[list]: sequence numbers of rows of interest.\n columns[list]: sequence numbers of columns of interest.\n\n Return:\n ------\n dnn_acts[array]: DNN activation after mask\n a 4D array with its shape as (n_stim, n_chn, n_row, n_col)\n \"\"\"\n if channels is not None:\n channels = [chn-1 for chn in channels]\n dnn_acts = dnn_acts[:, channels, :, :]\n if rows is not None:\n rows = [row-1 for row in rows]\n dnn_acts = dnn_acts[:, :, rows, :]\n if columns is not None:\n columns = [col-1 for col in columns]\n dnn_acts = dnn_acts[:, :, :, columns]\n\n return dnn_acts\n\n\ndef dnn_fe(dnn_acts, method, n_feat, axis=None):\n \"\"\"\n Extract features of DNN activation\n\n Parameters:\n ----------\n dnn_acts[array]: DNN activation\n a 4D array with its shape as (n_stim, n_chn, n_row, n_col)\n method[str]: feature extraction method, choices=(pca, hist, psd)\n pca: use n_feat principal components as features\n hist: use histogram of activation as features\n Note: n_feat equal-width bins in the given range will be used!\n psd: use power spectral density as features\n n_feat[int]: The number of features to extract\n axis{str}: axis for feature extraction, choices=(chn, row_col)\n If is chn, extract feature along channel axis.\n The result will be an array with shape\n as (n_stim, n_feat, n_row, n_col)\n If is row_col, extract feature alone row and column axis.\n The result will be an array with shape\n as (n_stim, n_chn, n_feat, 1)\n If is None, extract features from the whole layer.\n The result will be an array with shape\n as (n_stim, n_feat, 1, 1)\n We always regard the shape of the result as (n_stim, n_chn, n_row, n_col)\n\n Return:\n ------\n dnn_acts_new[array]: DNN activation\n a 4D array with its shape as (n_stim, n_chn, n_row, n_col)\n \"\"\"\n # adjust iterative axis\n n_stim, n_chn, n_row, n_col = dnn_acts.shape\n dnn_acts = dnn_acts.reshape((n_stim, n_chn, n_row*n_col))\n if axis is None:\n dnn_acts = dnn_acts.reshape((n_stim, 1, -1))\n elif axis == 'chn':\n dnn_acts = dnn_acts.transpose((0, 2, 1))\n elif axis == 'row_col':\n pass\n else:\n raise ValueError('not supported axis:', axis)\n _, n_iter, _ = dnn_acts.shape\n\n # extract features\n dnn_acts_new = np.zeros((n_stim, n_iter, n_feat))\n if method == 'pca':\n pca = PCA(n_components=n_feat)\n for i in range(n_iter):\n dnn_acts_new[:, i, :] = pca.fit_transform(dnn_acts[:, i, :])\n elif method == 'hist':\n for i in range(n_iter):\n for j in range(n_stim):\n dnn_acts_new[j, i, :] = np.histogram(dnn_acts[j, i, :], n_feat)[0]\n elif method == 'psd':\n for i in range(n_iter):\n for j in range(n_stim):\n f, p = periodogram(dnn_acts[j, i, :])\n dnn_acts_new[j, i, :] = p[:n_feat]\n else:\n raise ValueError('not supported method:', method)\n\n # adjust iterative axis\n if axis is None:\n dnn_acts_new = dnn_acts_new.transpose((0, 2, 1))\n dnn_acts_new = dnn_acts_new[:, :, :, None]\n elif axis == 'chn':\n dnn_acts_new = dnn_acts_new.transpose((0, 2, 1))\n dnn_acts_new = dnn_acts_new.reshape((n_stim, n_feat, n_row, n_col))\n else:\n dnn_acts_new = dnn_acts_new[:, :, :, None]\n\n return dnn_acts_new\n"
] |
[
[
"sklearn.model_selection.cross_val_score",
"sklearn.linear_model.LogisticRegression",
"numpy.histogram",
"torch.zeros",
"torch.cat",
"numpy.median",
"numpy.linalg.norm",
"torch.unsqueeze",
"sklearn.linear_model.Lasso",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"sklearn.linear_model.LinearRegression",
"sklearn.svm.SVC",
"scipy.signal.periodogram",
"numpy.array",
"numpy.zeros",
"sklearn.decomposition.PCA"
]
] |
crioso/PICwriter
|
[
"24b4ca37361899cba9d23c057b14429055a3da0f"
] |
[
"picwriter/components/disk.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport gdspy\nimport uuid\nimport picwriter.toolkit as tk\n\nclass Disk(gdspy.Cell):\n \"\"\" Disk Resonator Cell class (subclass of gdspy.Cell).\n\n Args:\n * **wgt** (WaveguideTemplate): WaveguideTemplate object\n * **radius** (float): Radius of the resonator\n * **coupling_gap** (float): Distance between the bus waveguide and resonator\n\n Keyword Args:\n * **wrap_angle** (float): Angle in *radians* between 0 and pi (defaults to 0) that determines how much the bus waveguide wraps along the resonator. 0 corresponds to a straight bus waveguide, and pi corresponds to a bus waveguide wrapped around half of the resonator.\n * **parity** (1 or -1): If 1, resonator to left of bus waveguide, if -1 resonator to the right\n * **port** (tuple): Cartesian coordinate of the input port (x1, y1)\n * **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)\n\n Members:\n * **portlist** (dict): Dictionary with the relevant port information\n\n Portlist format:\n * portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}\n * portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}\n\n Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the component, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.\n 'Direction' points *towards* the waveguide that will connect to it.\n\n \"\"\"\n def __init__(self, wgt, radius, coupling_gap, wrap_angle=0, parity=1, port=(0,0), direction='EAST'):\n gdspy.Cell.__init__(self, \"Disk--\"+str(uuid.uuid4()))\n\n self.portlist = {}\n\n self.port = port\n # self.trace=[port, tk.translate_point(port, 2*radius, direction)]\n self.direction = direction\n\n self.radius = radius\n self.coupling_gap = coupling_gap\n self.wrap_angle = wrap_angle\n if (wrap_angle > np.pi) or (wrap_angle < 0):\n raise ValueError(\"Warning! Wrap_angle is nor a valid angle between 0 and pi.\")\n self.parity = parity\n self.resist = wgt.resist\n self.wgt = wgt\n self.wg_spec = {'layer': wgt.wg_layer, 'datatype': wgt.wg_datatype}\n self.clad_spec = {'layer': wgt.clad_layer, 'datatype': wgt.clad_datatype}\n\n self.build_cell()\n self.build_ports()\n\n def build_cell(self):\n # Sequentially build all the geometric shapes using gdspy path functions\n # for waveguide, then add it to the Cell\n\n if self.wrap_angle==0:\n bus_length = 2*self.radius\n # Add bus waveguide with cladding\n path = gdspy.Path(self.wgt.wg_width, self.port)\n path.segment(2*self.radius, direction='+x', **self.wg_spec)\n clad = gdspy.Path(2*self.wgt.clad_width+self.wgt.wg_width, self.port)\n clad.segment(2*self.radius, direction='+x', **self.clad_spec)\n\n # Ring resonator\n if self.parity==1:\n ring = gdspy.Round((self.port[0]+self.radius, self.port[1]+self.radius+self.wgt.wg_width + self.coupling_gap),\n self.radius+self.wgt.wg_width/2.0, number_of_points=0.1, **self.wg_spec)\n clad_ring = gdspy.Round((self.port[0]+self.radius, self.port[1]+self.radius+self.wgt.wg_width + self.coupling_gap),\n self.radius+self.wgt.wg_width/2.0+self.wgt.clad_width, number_of_points=0.1, **self.clad_spec)\n elif self.parity==-1:\n ring = gdspy.Round((self.port[0]+self.radius, self.port[1]-self.radius-self.wgt.wg_width - self.coupling_gap),\n self.radius+self.wgt.wg_width/2.0, number_of_points=0.1, **self.wg_spec)\n clad_ring = gdspy.Round((self.port[0]+self.radius, self.port[1] - self.radius - self.wgt.wg_width - self.coupling_gap),\n self.radius+self.wgt.wg_width/2.0+self.wgt.clad_width, number_of_points=0.1, **self.clad_spec)\n else:\n raise ValueError(\"Warning! Parity value is not an acceptable value (must be +1 or -1).\")\n elif self.wrap_angle>0:\n theta = self.wrap_angle/2.0\n rp = self.radius + self.wgt.wg_width + self.coupling_gap\n dx, dy = rp*np.sin(theta), rp - rp*np.cos(theta)\n bus_length = 2*self.radius if (4*dx < 2*self.radius) else 4*dx\n\n # Add bus waveguide with cladding that wraps\n path = gdspy.Path(self.wgt.wg_width, self.port)\n clad = gdspy.Path(2*self.wgt.clad_width+self.wgt.wg_width, self.port)\n if 4*dx < bus_length:\n path.segment((bus_length-4*dx)/2.0, direction='+x', **self.wg_spec)\n clad.segment((bus_length-4*dx)/2.0, direction='+x', **self.clad_spec)\n xcenter = self.port[0] + self.radius\n else:\n xcenter = self.port[0] + 2*dx\n\n if self.parity==1:\n path.arc(rp, np.pi/2.0, np.pi/2.0 - theta, number_of_points=0.1, **self.wg_spec)\n path.arc(rp, -np.pi/2.0 - theta, -np.pi/2.0 + theta, number_of_points=0.1, **self.wg_spec)\n path.arc(rp, np.pi/2.0 + theta, np.pi/2.0, number_of_points=0.1, **self.wg_spec)\n clad.arc(rp, np.pi/2.0, np.pi/2.0 - theta, number_of_points=0.1, **self.clad_spec)\n clad.arc(rp, -np.pi/2.0 - theta, -np.pi/2.0 + theta, number_of_points=0.1, **self.clad_spec)\n clad.arc(rp, np.pi/2.0 + theta, np.pi/2.0, number_of_points=0.1, **self.clad_spec)\n\n # Make the disk resonator\n ring = gdspy.Round((xcenter, self.port[1]+self.radius+self.wgt.wg_width + self.coupling_gap - 2*dy),\n self.radius+self.wgt.wg_width/2.0, number_of_points=0.1, **self.wg_spec)\n clad_ring = gdspy.Round((xcenter, self.port[1]+self.radius+self.wgt.wg_width + self.coupling_gap - 2*dy),\n self.radius+self.wgt.wg_width/2.0+self.wgt.clad_width, number_of_points=0.1, **self.clad_spec)\n\n elif self.parity==-1:\n path.arc(rp, -np.pi/2.0, -np.pi/2.0 + theta, number_of_points=0.1, **self.wg_spec)\n path.arc(rp, np.pi/2.0 + theta, np.pi/2.0 - theta, number_of_points=0.1, **self.wg_spec)\n path.arc(rp, -np.pi/2.0 - theta, -np.pi/2.0, number_of_points=0.1, **self.wg_spec)\n clad.arc(rp, -np.pi/2.0, -np.pi/2.0 + theta, number_of_points=0.1, **self.clad_spec)\n clad.arc(rp, np.pi/2.0 + theta, np.pi/2.0 - theta, number_of_points=0.1, **self.clad_spec)\n clad.arc(rp, -np.pi/2.0 - theta, -np.pi/2.0, number_of_points=0.1, **self.clad_spec)\n\n # Make the disk resonator\n ring = gdspy.Round((xcenter, self.port[1]-self.radius-self.wgt.wg_width - self.coupling_gap + 2*dy),\n self.radius+self.wgt.wg_width/2.0, number_of_points=0.1, **self.wg_spec)\n clad_ring = gdspy.Round((xcenter, self.port[1]-self.radius-self.wgt.wg_width - self.coupling_gap + 2*dy),\n self.radius+self.wgt.wg_width/2.0+self.wgt.clad_width, number_of_points=0.1, **self.clad_spec)\n\n\n if 4*dx < bus_length:\n path.segment((bus_length-4*dx)/2.0, **self.wg_spec)\n clad.segment((bus_length-4*dx)/2.0, **self.clad_spec)\n\n angle=0\n if self.direction==\"EAST\":\n self.port_output = (self.port[0]+bus_length, self.port[1])\n angle=0\n elif self.direction==\"NORTH\":\n self.port_output = (self.port[0], self.port[1]+bus_length)\n angle=np.pi/2.0\n elif self.direction==\"WEST\":\n self.port_output = (self.port[0]-bus_length, self.port[1])\n angle=np.pi\n elif self.direction==\"SOUTH\":\n self.port_output = (self.port[0], self.port[1]-bus_length)\n angle=-np.pi/2.0\n elif isinstance(self.direction, float):\n angle = self.direction\n self.port_output = (self.port[0]+bus_length*np.cos(angle), self.port[1]+bus_length*np.sin(angle))\n\n ring.rotate(angle, self.port)\n clad_ring.rotate(angle, self.port)\n path.rotate(angle, self.port)\n clad.rotate(angle, self.port)\n\n self.add(ring)\n self.add(clad_ring)\n self.add(path)\n self.add(clad)\n\n def build_ports(self):\n # Portlist format:\n # example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}\n self.portlist[\"input\"] = {'port':self.port,\n 'direction':tk.flip_direction(self.direction)}\n self.portlist[\"output\"] = {'port':self.port_output,\n 'direction':self.direction}\n\nif __name__ == \"__main__\":\n from . import *\n top = gdspy.Cell(\"top\")\n wgt = WaveguideTemplate(bend_radius=50, resist='+')\n\n wg1=Waveguide([(0,0), (100,0)], wgt)\n tk.add(top, wg1)\n\n r1 = Disk(wgt, 60.0, 1.0, wrap_angle=np.pi/2., parity=1, **wg1.portlist[\"output\"])\n\n wg2=Waveguide([r1.portlist[\"output\"][\"port\"], (r1.portlist[\"output\"][\"port\"][0]+100, r1.portlist[\"output\"][\"port\"][1])], wgt)\n tk.add(top, wg2)\n\n r2 = Disk(wgt, 50.0, 0.8, wrap_angle=np.pi, parity=-1, **wg2.portlist[\"output\"])\n\n wg3=Waveguide([r2.portlist[\"output\"][\"port\"], (r2.portlist[\"output\"][\"port\"][0]+100, r2.portlist[\"output\"][\"port\"][1])], wgt)\n tk.add(top, wg3)\n\n r3 = Disk(wgt, 40.0, 0.6, parity=1, **wg3.portlist[\"output\"])\n\n wg4=Waveguide([r3.portlist[\"output\"][\"port\"], (r3.portlist[\"output\"][\"port\"][0]+100, r3.portlist[\"output\"][\"port\"][1])], wgt)\n tk.add(top, wg4)\n\n tk.add(top, r1)\n tk.add(top, r2)\n tk.add(top, r3)\n\n gdspy.LayoutViewer()\n # gdspy.write_gds('disk.gds', unit=1.0e-6, precision=1.0e-9)\n"
] |
[
[
"numpy.cos",
"numpy.sin"
]
] |
ducky-hong/pytorch-dcase-task3
|
[
"9272ce7b5ac2f4838908704f38d0392d519262c2"
] |
[
"data_loader/data_loaders.py"
] |
[
"import os\nimport glob\nimport itertools\nimport numpy as np\nimport torch\nfrom base import BaseDataLoader\nfrom torch.utils.data import Dataset, DataLoader\n \nclass FeatureNpyDataset(Dataset):\n def __init__(self, root_dir, datasets, transform=None):\n self.data = list(itertools.chain(*[glob.glob(os.path.join(root_dir, dataset_id, '*/*.npy')) for dataset_id in datasets]))\n self.transform = transform\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n filepath = self.data[index]\n has_bird = int(filepath.split('/')[-2])\n feature = np.load(filepath)\n\n if self.transform:\n feature = self.transform(feature)\n\n return feature, has_bird\n\nclass BADDataLoader(BaseDataLoader):\n def __init__(self, data_dir, sample_rate, fold, batch_size, shuffle, validation_split, num_workers):\n self.data_dir = os.path.join(data_dir, str(sample_rate))\n self.batch_size = batch_size\n\n train_sets = os.listdir(self.data_dir)\n validation_sets = [train_sets.pop(fold)]\n \n transform = lambda a: torch.from_numpy(np.expand_dims(a[:501,:], axis=0)).float()\n self.dataset = FeatureNpyDataset(self.data_dir, train_sets, transform=transform)\n self.validation_dataset = FeatureNpyDataset(self.data_dir, validation_sets, transform=transform)\n\n print('Fold', fold, train_sets, len(self.dataset), validation_sets, len(self.validation_dataset))\n super(BADDataLoader, self).__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)\n\n def split_validation(self):\n init_kwargs = self.init_kwargs.copy()\n init_kwargs['dataset'] = self.validation_dataset\n return DataLoader(**init_kwargs)\n\nclass RawDataLoader(BaseDataLoader):\n def __init__(self, data_dir, sample_rate, fold, batch_size, shuffle, validation_split, num_workers):\n self.data_dir = os.path.join(data_dir, str(sample_rate))\n self.batch_size = batch_size\n\n train_sets = os.listdir(self.data_dir)\n validation_sets = [train_sets.pop(fold)]\n \n transform = lambda a: torch.from_numpy(np.expand_dims(a[:sample_rate*10], axis=0)).float()\n self.dataset = FeatureNpyDataset(self.data_dir, train_sets, transform=transform)\n self.validation_dataset = FeatureNpyDataset(self.data_dir, validation_sets, transform=transform)\n\n print('Fold', fold, train_sets, len(self.dataset), validation_sets, len(self.validation_dataset))\n super(RawDataLoader, self).__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)\n\n def split_validation(self):\n init_kwargs = self.init_kwargs.copy()\n init_kwargs['dataset'] = self.validation_dataset\n return DataLoader(**init_kwargs)\n\nclass BulbulDataLoader(BaseDataLoader):\n def __init__(self, data_dir, sample_rate, fold, batch_size, shuffle, validation_split, num_workers):\n self.data_dir = os.path.join(data_dir, str(sample_rate))\n self.batch_size = batch_size\n\n train_sets = os.listdir(self.data_dir)\n validation_sets = [train_sets.pop(fold)]\n \n transform = lambda a: torch.from_numpy(np.expand_dims(a[:716,:].T, axis=0)).float()\n self.dataset = FeatureNpyDataset(self.data_dir, train_sets, transform=transform)\n self.validation_dataset = FeatureNpyDataset(self.data_dir, validation_sets, transform=transform)\n\n print('Fold', fold, train_sets, len(self.dataset), validation_sets, len(self.validation_dataset))\n super(BulbulDataLoader, self).__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)\n\n def split_validation(self):\n init_kwargs = self.init_kwargs.copy()\n init_kwargs['dataset'] = self.validation_dataset\n return DataLoader(**init_kwargs)\n"
] |
[
[
"numpy.load",
"numpy.expand_dims",
"torch.utils.data.DataLoader"
]
] |
dustindall/idi-model
|
[
"5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb"
] |
[
"tests/models/policy_models/disabled_deterministic_res/test_disabled_deterministic_res.py"
] |
[
"import os\n\nimport pandas as pd\nimport pytest\nfrom footings.audit import AuditConfig, AuditStepConfig\nfrom footings.testing import assert_footings_files_equal\n\nfrom footings_idi_model.models import DValResRPMD\n\nCASES = [\n (\n \"test_1\",\n {\n \"valuation_dt\": pd.Timestamp(\"2005-02-10\"),\n \"assumption_set\": \"STAT\",\n \"policy_id\": \"M1\",\n \"claim_id\": \"M1C1\",\n \"gender\": \"M\",\n \"birth_dt\": pd.Timestamp(\"1970-02-10\"),\n \"incurred_dt\": pd.Timestamp(\"2005-02-10\"),\n \"termination_dt\": pd.Timestamp(\"2037-02-10\"),\n \"elimination_period\": 90,\n \"idi_contract\": \"AS\",\n \"idi_benefit_period\": \"TO67\",\n \"idi_diagnosis_grp\": \"AG\",\n \"idi_occupation_class\": \"M\",\n \"cola_percent\": 0.0,\n \"residual_benefit_percent\": 0.5,\n \"benefit_amount\": 100.0,\n },\n ),\n]\n\n\n@pytest.fixture(scope=\"session\")\ndef tempdir(tmpdir_factory):\n dir_name = os.path.dirname(__file__).split(\"/\")[-1]\n return tmpdir_factory.mktemp(dir_name)\n\n\n@pytest.mark.parametrize(\"case\", CASES, ids=[x[0] for x in CASES])\ndef test_disabled_deterministic_res(case, tempdir):\n name, parameters = case\n test_file = tempdir.join(f\"test-{name}.json\")\n expected_file = os.path.join(\n os.path.dirname(__file__), \"audit_files\", f\"expected-{name}.json\",\n )\n config = AuditConfig(\n show_signature=False,\n show_docstring=False,\n show_steps=True,\n step_config=AuditStepConfig(\n show_method_name=False,\n show_docstring=False,\n show_uses=True,\n show_impacts=True,\n show_output=True,\n show_metadata=False,\n ),\n )\n DValResRPMD(**parameters).audit(test_file, config=config)\n exlcude_list = exlcude_list = [\n \"*RUN_DATE_TIME\",\n \"*MODEL_VERSION\",\n \"*LAST_COMMIT\",\n ]\n assert_footings_files_equal(test_file, expected_file, exclude_keys=exlcude_list)\n"
] |
[
[
"pandas.Timestamp"
]
] |
albertovilla/transformers
|
[
"47a98fc4cb6a561576309a57b315b042977d194c"
] |
[
"src/transformers/models/deberta/modeling_deberta.py"
] |
[
"# coding=utf-8\n# Copyright 2020 Microsoft and the Hugging Face Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch DeBERTa model. \"\"\"\n\nimport math\nfrom collections.abc import Sequence\n\nimport torch\nfrom torch import _softmax_backward_data, nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_outputs import (\n BaseModelOutput,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_deberta import DebertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"DebertaConfig\"\n_TOKENIZER_FOR_DOC = \"DebertaTokenizer\"\n_CHECKPOINT_FOR_DOC = \"microsoft/deberta-base\"\n\nDEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/deberta-base\",\n \"microsoft/deberta-large\",\n \"microsoft/deberta-xlarge\",\n \"microsoft/deberta-base-mnli\",\n \"microsoft/deberta-large-mnli\",\n \"microsoft/deberta-xlarge-mnli\",\n]\n\n\nclass ContextPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)\n self.dropout = StableDropout(config.pooler_dropout)\n self.config = config\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n\n context_token = hidden_states[:, 0]\n context_token = self.dropout(context_token)\n pooled_output = self.dense(context_token)\n pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)\n return pooled_output\n\n @property\n def output_dim(self):\n return self.config.hidden_size\n\n\nclass XSoftmax(torch.autograd.Function):\n \"\"\"\n Masked Softmax which is optimized for saving memory\n\n Args:\n input (:obj:`torch.tensor`): The input tensor that will apply softmax.\n mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n\n Example::\n\n >>> import torch\n >>> from transformers.models.deberta.modeling_deberta import XSoftmax\n\n >>> # Make a tensor\n >>> x = torch.randn([4,20,100])\n\n >>> # Create a mask\n >>> mask = (x>0).int()\n\n >>> y = XSoftmax.apply(x, mask, dim=-1)\n \"\"\"\n\n @staticmethod\n def forward(self, input, mask, dim):\n self.dim = dim\n rmask = ~(mask.bool())\n\n output = input.masked_fill(rmask, float(\"-inf\"))\n output = torch.softmax(output, self.dim)\n output.masked_fill_(rmask, 0)\n self.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(self, grad_output):\n (output,) = self.saved_tensors\n inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)\n return inputGrad, None, None\n\n\nclass DropoutContext(object):\n def __init__(self):\n self.dropout = 0\n self.mask = None\n self.scale = 1\n self.reuse_mask = True\n\n\ndef get_mask(input, local_context):\n if not isinstance(local_context, DropoutContext):\n dropout = local_context\n mask = None\n else:\n dropout = local_context.dropout\n dropout *= local_context.scale\n mask = local_context.mask if local_context.reuse_mask else None\n\n if dropout > 0 and mask is None:\n mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()\n\n if isinstance(local_context, DropoutContext):\n if local_context.mask is None:\n local_context.mask = mask\n\n return mask, dropout\n\n\nclass XDropout(torch.autograd.Function):\n \"\"\"Optimized dropout function to save computation and memory by using mask operation instead of multiplication.\"\"\"\n\n @staticmethod\n def forward(ctx, input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n ctx.scale = 1.0 / (1 - dropout)\n if dropout > 0:\n ctx.save_for_backward(mask)\n return input.masked_fill(mask, 0) * ctx.scale\n else:\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.scale > 1:\n (mask,) = ctx.saved_tensors\n return grad_output.masked_fill(mask, 0) * ctx.scale, None\n else:\n return grad_output, None\n\n\nclass StableDropout(torch.nn.Module):\n \"\"\"\n Optimized dropout module for stabilizing the training\n\n Args:\n drop_prob (float): the dropout probabilities\n \"\"\"\n\n def __init__(self, drop_prob):\n super().__init__()\n self.drop_prob = drop_prob\n self.count = 0\n self.context_stack = None\n\n def forward(self, x):\n \"\"\"\n Call the module\n\n Args:\n x (:obj:`torch.tensor`): The input tensor to apply dropout\n \"\"\"\n if self.training and self.drop_prob > 0:\n return XDropout.apply(x, self.get_context())\n return x\n\n def clear_context(self):\n self.count = 0\n self.context_stack = None\n\n def init_context(self, reuse_mask=True, scale=1):\n if self.context_stack is None:\n self.context_stack = []\n self.count = 0\n for c in self.context_stack:\n c.reuse_mask = reuse_mask\n c.scale = scale\n\n def get_context(self):\n if self.context_stack is not None:\n if self.count >= len(self.context_stack):\n self.context_stack.append(DropoutContext())\n ctx = self.context_stack[self.count]\n ctx.dropout = self.drop_prob\n self.count += 1\n return ctx\n else:\n return self.drop_prob\n\n\nclass DebertaLayerNorm(nn.Module):\n \"\"\"LayerNorm module in the TF style (epsilon inside the square root).\"\"\"\n\n def __init__(self, size, eps=1e-12):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(size))\n self.bias = nn.Parameter(torch.zeros(size))\n self.variance_epsilon = eps\n\n def forward(self, hidden_states):\n input_type = hidden_states.dtype\n hidden_states = hidden_states.float()\n mean = hidden_states.mean(-1, keepdim=True)\n variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)\n hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)\n hidden_states = hidden_states.to(input_type)\n y = self.weight * hidden_states + self.bias\n return y\n\n\nclass DebertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass DebertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = DisentangledSelfAttention(config)\n self.output = DebertaSelfOutput(config)\n self.config = config\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n self_output = self.self(\n hidden_states,\n attention_mask,\n return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n self_output, att_matrix = self_output\n if query_states is None:\n query_states = hidden_states\n attention_output = self.output(self_output, query_states)\n\n if return_att:\n return (attention_output, att_matrix)\n else:\n return attention_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta\nclass DebertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass DebertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass DebertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = DebertaAttention(config)\n self.intermediate = DebertaIntermediate(config)\n self.output = DebertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n attention_output = self.attention(\n hidden_states,\n attention_mask,\n return_att=return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n attention_output, att_matrix = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if return_att:\n return (layer_output, att_matrix)\n else:\n return layer_output\n\n\nclass DebertaEncoder(nn.Module):\n \"\"\"Modified BertEncoder with relative position bias support\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.relative_attention = getattr(config, \"relative_attention\", False)\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)\n\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if attention_mask.dim() <= 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)\n attention_mask = attention_mask.byte()\n elif attention_mask.dim() == 3:\n attention_mask = attention_mask.unsqueeze(1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)\n relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)\n return relative_pos\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_hidden_states=True,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n return_dict=True,\n ):\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[0]\n else:\n next_kv = hidden_states\n rel_embeddings = self.get_rel_embedding()\n for i, layer_module in enumerate(self.layer):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n hidden_states = layer_module(\n next_kv,\n attention_mask,\n output_attentions,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if output_attentions:\n hidden_states, att_m = hidden_states\n\n if query_states is not None:\n query_states = hidden_states\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None\n else:\n next_kv = hidden_states\n\n if output_attentions:\n all_attentions = all_attentions + (att_m,)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\ndef build_relative_position(query_size, key_size, device):\n \"\"\"\n Build relative position according to the query and key\n\n We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key\n :math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\\\rightarrow k} =\n P_q - P_k`\n\n Args:\n query_size (int): the length of query\n key_size (int): the length of key\n\n Return:\n :obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]\n\n \"\"\"\n\n q_ids = torch.arange(query_size, dtype=torch.long, device=device)\n k_ids = torch.arange(key_size, dtype=torch.long, device=device)\n rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = rel_pos_ids.unsqueeze(0)\n return rel_pos_ids\n\n\n@torch.jit.script\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])\n\n\n@torch.jit.script\ndef p2c_dynamic_expand(c2p_pos, query_layer, key_layer):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])\n\n\n@torch.jit.script\ndef pos_dynamic_expand(pos_index, p2c_att, key_layer):\n return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))\n\n\nclass DisentangledSelfAttention(torch.nn.Module):\n \"\"\"\n Disentangled self-attention module\n\n Parameters:\n config (:obj:`str`):\n A model config class instance with the configuration to build a new model. The schema is similar to\n `BertConfig`, for more details, please refer :class:`~transformers.DebertaConfig`\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.in_proj = torch.nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)\n self.q_bias = torch.nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))\n self.v_bias = torch.nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))\n self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []\n\n self.relative_attention = getattr(config, \"relative_attention\", False)\n self.talking_head = getattr(config, \"talking_head\", False)\n\n if self.talking_head:\n self.head_logits_proj = torch.nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)\n self.head_weights_proj = torch.nn.Linear(\n config.num_attention_heads, config.num_attention_heads, bias=False\n )\n\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.pos_dropout = StableDropout(config.hidden_dropout_prob)\n\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_proj = torch.nn.Linear(config.hidden_size, self.all_head_size, bias=False)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_q_proj = torch.nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = StableDropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n \"\"\"\n Call the module\n\n Args:\n hidden_states (:obj:`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n `Attention(Q,K,V)`\n\n attention_mask (:obj:`torch.ByteTensor`):\n An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum\n sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`\n th token.\n\n return_att (:obj:`bool`, optional):\n Whether return the attention matrix.\n\n query_states (:obj:`torch.FloatTensor`, optional):\n The `Q` state in `Attention(Q,K,V)`.\n\n relative_pos (:obj:`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with\n values ranging in [`-max_relative_positions`, `max_relative_positions`].\n\n rel_embeddings (:obj:`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [:math:`2 \\\\times\n \\\\text{max_relative_positions}`, `hidden_size`].\n\n\n \"\"\"\n if query_states is None:\n qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)\n query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)\n else:\n\n def linear(w, b, x):\n if b is not None:\n return torch.matmul(x, w.t()) + b.t()\n else:\n return torch.matmul(x, w.t()) # + b.t()\n\n ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)\n qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]\n qkvb = [None] * 3\n\n q = linear(qkvw[0], qkvb[0], query_states)\n k, v = [linear(qkvw[i], qkvb[i], hidden_states) for i in range(1, 3)]\n query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]\n\n query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])\n value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])\n\n rel_att = None\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n scale_factor = 1 + len(self.pos_att_type)\n scale = math.sqrt(query_layer.size(-1) * scale_factor)\n query_layer = query_layer / scale\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)\n\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n\n # bxhxlxd\n if self.talking_head:\n attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n\n attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)\n attention_probs = self.dropout(attention_probs)\n if self.talking_head:\n attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(*new_context_layer_shape)\n if return_att:\n return (context_layer, attention_probs)\n else:\n return context_layer\n\n def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):\n if relative_pos is None:\n q = query_layer.size(-2)\n relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)\n if relative_pos.dim() == 2:\n relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)\n elif relative_pos.dim() == 3:\n relative_pos = relative_pos.unsqueeze(1)\n # bxhxqxk\n elif relative_pos.dim() != 4:\n raise ValueError(f\"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}\")\n\n att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)\n relative_pos = relative_pos.long().to(query_layer.device)\n rel_embeddings = rel_embeddings[\n self.max_relative_positions - att_span : self.max_relative_positions + att_span, :\n ].unsqueeze(0)\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_key_layer = self.pos_proj(rel_embeddings)\n pos_key_layer = self.transpose_for_scores(pos_key_layer)\n\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer = self.pos_q_proj(rel_embeddings)\n pos_query_layer = self.transpose_for_scores(pos_query_layer)\n\n score = 0\n # content->position\n if \"c2p\" in self.pos_att_type:\n c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))\n c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)\n c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))\n score += c2p_att\n\n # position->content\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer /= math.sqrt(pos_query_layer.size(-1) * scale_factor)\n if query_layer.size(-2) != key_layer.size(-2):\n r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)\n else:\n r_pos = relative_pos\n p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)\n if query_layer.size(-2) != key_layer.size(-2):\n pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)\n\n if \"p2c\" in self.pos_att_type:\n p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2))\n p2c_att = torch.gather(\n p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)\n ).transpose(-1, -2)\n if query_layer.size(-2) != key_layer.size(-2):\n p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))\n score += p2c_att\n\n return score\n\n\nclass DebertaEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n pad_token_id = getattr(config, \"pad_token_id\", 0)\n self.embedding_size = getattr(config, \"embedding_size\", config.hidden_size)\n self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)\n\n self.position_biased_input = getattr(config, \"position_biased_input\", True)\n if not self.position_biased_input:\n self.position_embeddings = None\n else:\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)\n\n if config.type_vocab_size > 0:\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)\n\n if self.embedding_size != config.hidden_size:\n self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.position_embeddings is not None:\n position_embeddings = self.position_embeddings(position_ids.long())\n else:\n position_embeddings = torch.zeros_like(inputs_embeds)\n\n embeddings = inputs_embeds\n if self.position_biased_input:\n embeddings += position_embeddings\n if self.config.type_vocab_size > 0:\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings += token_type_embeddings\n\n if self.embedding_size != self.config.hidden_size:\n embeddings = self.embed_proj(embeddings)\n\n embeddings = self.LayerNorm(embeddings)\n\n if mask is not None:\n if mask.dim() != embeddings.dim():\n if mask.dim() == 4:\n mask = mask.squeeze(1).squeeze(1)\n mask = mask.unsqueeze(2)\n mask = mask.to(embeddings.dtype)\n\n embeddings = embeddings * mask\n\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass DebertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = DebertaConfig\n base_model_prefix = \"deberta\"\n _keys_to_ignore_on_load_missing = [\"position_ids\"]\n _keys_to_ignore_on_load_unexpected = [\"position_embeddings\"]\n\n def __init__(self, config):\n super().__init__(config)\n self._register_load_state_dict_pre_hook(self._pre_load_hook)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n def _pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n \"\"\"\n Removes the classifier if it doesn't have the correct number of labels.\n \"\"\"\n self_state = self.state_dict()\n if (\n (\"classifier.weight\" in self_state)\n and (\"classifier.weight\" in state_dict)\n and self_state[\"classifier.weight\"].size() != state_dict[\"classifier.weight\"].size()\n ):\n logger.warning(\n f\"The checkpoint classifier head has a shape {state_dict['classifier.weight'].size()} and this model \"\n f\"classifier head has a shape {self_state['classifier.weight'].size()}. Ignoring the checkpoint \"\n f\"weights. You should train your model on new data.\"\n )\n del state_dict[\"classifier.weight\"]\n if \"classifier.bias\" in state_dict:\n del state_dict[\"classifier.bias\"]\n\n\nDEBERTA_START_DOCSTRING = r\"\"\"\n The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention\n <https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of\n BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.```\n\n\n Parameters:\n config (:class:`~transformers.DebertaConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nDEBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.DebertaTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaModel(DebertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = DebertaEmbeddings(config)\n self.encoder = DebertaEncoder(config)\n self.z_steps = 0\n self.config = config\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError(\"The prune function is not implemented in DeBERTa model.\")\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n mask=attention_mask,\n inputs_embeds=inputs_embeds,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask,\n output_hidden_states=True,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n encoded_layers = encoder_outputs[1]\n\n if self.z_steps > 1:\n hidden_states = encoded_layers[-2]\n layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]\n query_states = encoded_layers[-1]\n rel_embeddings = self.encoder.get_rel_embedding()\n attention_mask = self.encoder.get_attention_mask(attention_mask)\n rel_pos = self.encoder.get_rel_pos(embedding_output)\n for layer in layers[1:]:\n query_states = layer(\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=query_states,\n relative_pos=rel_pos,\n rel_embeddings=rel_embeddings,\n )\n encoded_layers.append(query_states)\n\n sequence_output = encoded_layers[-1]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"DeBERTa Model with a `language modeling` head on top. \"\"\", DEBERTA_START_DOCSTRING)\nclass DebertaForMaskedLM(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.deberta = DebertaModel(config)\n self.cls = DebertaOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta\nclass DebertaPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta\nclass DebertaLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = DebertaPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta\nclass DebertaOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = DebertaLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForSequenceClassification(DebertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n num_labels = getattr(config, \"num_labels\", 2)\n self.num_labels = num_labels\n\n self.deberta = DebertaModel(config)\n self.pooler = ContextPooler(config)\n output_dim = self.pooler.output_dim\n\n self.classifier = torch.nn.Linear(output_dim, num_labels)\n drop_out = getattr(config, \"cls_dropout\", None)\n drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out\n self.dropout = StableDropout(drop_out)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.deberta.get_input_embeddings()\n\n def set_input_embeddings(self, new_embeddings):\n self.deberta.set_input_embeddings(new_embeddings)\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n encoder_layer = outputs[0]\n pooled_output = self.pooler(encoder_layer)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # regression task\n loss_fn = torch.nn.MSELoss()\n logits = logits.view(-1).to(labels.dtype)\n loss = loss_fn(logits, labels.view(-1))\n elif labels.dim() == 1 or labels.size(-1) == 1:\n label_index = (labels >= 0).nonzero()\n labels = labels.long()\n if label_index.size(0) > 0:\n labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))\n labels = torch.gather(labels, 0, label_index.view(-1))\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))\n else:\n loss = torch.tensor(0).to(logits)\n else:\n log_softmax = torch.nn.LogSoftmax(-1)\n loss = -((log_softmax(logits) * labels).sum(-1)).mean()\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n else:\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForTokenClassification(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForQuestionAnswering(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] |
[
[
"torch.softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.LogSoftmax",
"torch.zeros",
"torch.sqrt",
"torch.empty_like",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch._softmax_backward_data",
"torch.nn.Linear",
"torch.matmul",
"torch.tensor",
"torch.arange",
"torch.clamp",
"torch.nn.MSELoss"
]
] |
hanna-hansen/MuseGAN
|
[
"77937b2506003037ea3989a7e4cf9980817339ad"
] |
[
"model.py"
] |
[
"import torch\nfrom torch import nn\n\n######################################\n#### Helper functions ######\n######################################\ndef initialize_weights(layer, mean=0.0, std=0.02):\n if isinstance(layer, nn.Conv3d) or isinstance(layer, nn.ConvTranspose2d):\n torch.nn.init.normal_(layer.weight, mean, std)\n elif isinstance(layer, nn.Linear) or isinstance(layer, nn.BatchNorm2d):\n torch.nn.init.normal_(layer.weight, mean, std)\n torch.nn.init.constant_(layer.bias, 0)\nclass Reshape(nn.Module):\n def __init__(self, dims=[32, 1, 1]):\n super().__init__()\n self.dims=dims\n \n def forward(self, x):\n N = x.size(0)\n return x.view(N,*self.dims)\n \n######################################\n#### Temporal Network ######\n######################################\nclass TempNetwork(nn.Module):\n def __init__(self, z_dim=32, n_bars=2, hid_channels=1024):\n super().__init__()\n self.Input = Reshape(dims=[z_dim, 1, 1])\n \n self.Base = nn.Sequential()\n self.Base.add_module('upconv0', nn.ConvTranspose2d(z_dim, hid_channels,\n kernel_size=(2, 1), stride=(1, 1), padding=0))\n self.Base.add_module('bn0', nn.BatchNorm2d(hid_channels))\n self.Base.add_module('relu0', nn.ReLU())\n \n self.Base.add_module('upconv1', nn.ConvTranspose2d(hid_channels, z_dim,\n kernel_size=(n_bars-1, 1), stride=(1, 1), padding=0))\n self.Base.add_module('bn1', nn.BatchNorm2d(z_dim))\n self.Base.add_module('relu1', nn.ReLU())\n \n self.Output = Reshape(dims=[z_dim, n_bars])\n \n def forward(self, x):\n x = self.Input(x)\n x = self.Base(x)\n x = self.Output(x)\n return x\n\n######################################\n###### BarGenerator ########\n######################################\nclass BarGenerator(nn.Module):\n def __init__(self, z_dim=32, n_steps_per_bar=16, n_pitches=84,\n hid_features=1024, hid_channels=512, out_channels=1):\n super().__init__()\n self.Input = nn.Sequential()\n self.Input.add_module('dense0', nn.Linear(4* z_dim, hid_features))\n self.Input.add_module('bn0', nn.BatchNorm1d(hid_features))\n self.Input.add_module('relu0', nn.ReLU())\n self.Input.add_module('reshape0',\n Reshape(dims=[hid_channels, hid_features//hid_channels, 1]))\n \n self.Base = nn.Sequential()\n self.Base.add_module('upconv1', nn.ConvTranspose2d(hid_channels, hid_channels,\n kernel_size=(2, 1), stride=(2, 1), padding=0))\n self.Base.add_module('bn1', nn.BatchNorm2d(hid_channels))\n self.Base.add_module('relu1', nn.ReLU())\n \n self.Base.add_module('upconv2', nn.ConvTranspose2d(hid_channels, hid_channels//2,\n kernel_size=(2, 1), stride=(2, 1), padding=0))\n self.Base.add_module('bn2', nn.BatchNorm2d(hid_channels//2))\n self.Base.add_module('relu2', nn.ReLU())\n \n self.Base.add_module('upconv3', nn.ConvTranspose2d(hid_channels//2, hid_channels//2,\n kernel_size=(2, 1), stride=(2, 1), padding=0))\n self.Base.add_module('bn3', nn.BatchNorm2d(hid_channels//2))\n self.Base.add_module('relu3', nn.ReLU())\n \n self.Base.add_module('upconv4', nn.ConvTranspose2d(hid_channels//2, hid_channels//2,\n kernel_size=(1, 7), stride=(1, 7), padding=0))\n self.Base.add_module('bn4', nn.BatchNorm2d(hid_channels//2))\n self.Base.add_module('relu4', nn.ReLU())\n \n self.Output = nn.Sequential()\n self.Output.add_module('upconv5',nn.ConvTranspose2d(hid_channels//2, out_channels,\n kernel_size=(1, 12), stride=(1, 12), padding=0))\n self.Output.add_module('reshape5', Reshape(dims=[1, 1, n_steps_per_bar, n_pitches]))\n \n def forward(self, x):\n x = self.Input(x)\n x = self.Base(x)\n x = self.Output(x)\n return x\n\n######################################\n###### MuseCritic #######\n######################################\nclass MuseCritic(nn.Module):\n def __init__(self, input_shape=(4, 2, 16, 84),\n hid_channels=128,\n hid_features=1024,\n out_features=1):\n super().__init__()\n n_tracks, n_bars, n_steps_per_bar, n_pitches = input_shape\n self.Input=nn.Identity()\n \n self.Base = nn.Sequential()\n self.Base.add_module('conv0', nn.Conv3d(n_tracks, hid_channels,\n kernel_size=(2, 1, 1), stride=(1,1,1), padding=0))\n self.Base.add_module('lrelu0', nn.LeakyReLU(0.3)) \n \n self.Base.add_module('conv1', nn.Conv3d(hid_channels, hid_channels,\n kernel_size=(n_bars-1, 1, 1), stride=(1,1,1), padding=0))\n self.Base.add_module('lrelu1', nn.LeakyReLU(0.3))\n \n self.Base.add_module('conv2', nn.Conv3d(hid_channels, hid_channels,\n kernel_size=(1, 1, 12), stride=(1,1,12), padding=0))\n self.Base.add_module('lrelu2', nn.LeakyReLU(0.3)) \n \n self.Base.add_module('conv3', nn.Conv3d(hid_channels, hid_channels,\n kernel_size=(1, 1, 7), stride=(1,1,7), padding=0))\n self.Base.add_module('lrelu3', nn.LeakyReLU(0.3)) \n \n self.Base.add_module('conv4', nn.Conv3d(hid_channels, hid_channels,\n kernel_size=(1, 2, 1), stride=(1,2,1), padding=0))\n self.Base.add_module('lrelu4', nn.LeakyReLU(0.3)) \n \n self.Base.add_module('conv5', nn.Conv3d(hid_channels, hid_channels,\n kernel_size=(1, 2, 1), stride=(1,2,1), padding=0))\n self.Base.add_module('lrelu5', nn.LeakyReLU(0.3)) \n \n self.Base.add_module('conv6', nn.Conv3d(hid_channels, 2*hid_channels,\n kernel_size=(1, 4, 1), stride=(1,2,1), padding=(0, 1, 0)))\n self.Base.add_module('lrelu6', nn.LeakyReLU(0.3)) \n \n self.Base.add_module('conv7', nn.Conv3d(2*hid_channels, 4*hid_channels,\n kernel_size=(1, 3, 1), stride=(1,2,1), padding=(0, 1, 0)))\n self.Base.add_module('lrelu7', nn.LeakyReLU(0.3))\n \n self.Output=nn.Sequential()\n self.Output.add_module('flatten', nn.Flatten())\n self.Output.add_module('linear', nn.Linear(4*hid_channels, hid_features))\n self.Output.add_module('lrelu', nn.LeakyReLU(0.3))\n self.Output.add_module('fc', nn.Linear(hid_features, out_features))\n \n def forward(self, x):\n x = self.Input(x)\n x = self.Base(x)\n x = self.Output(x)\n return x\n\n######################################\n###### MuseGenerator #######\n######################################\nclass MuseGenerator(nn.Module):\n def __init__(self, n_tracks=4, n_bars=2, n_steps_per_bar=16, n_pitches=84,\n z_dim=32, hid_channels=1024, hid_features=1024, out_channels=1):\n super().__init__()\n self.n_bars=n_bars\n self.n_tracks=n_tracks\n \n self.ChordsNetwork=TempNetwork(z_dim=z_dim,\n n_bars=n_bars,\n hid_channels=hid_channels)\n \n self.MelodyNetworks=nn.ModuleDict({})\n for n in range(n_tracks):\n self.MelodyNetworks.add_module('melodygen'+str(n), TempNetwork(z_dim=z_dim,\n n_bars=n_bars,\n hid_channels=hid_channels))\n self.BarGenerators=nn.ModuleDict({}) \n for n in range(n_tracks):\n self.BarGenerators.add_module('bargen'+str(n), BarGenerator(z_dim=z_dim,\n n_steps_per_bar=n_steps_per_bar,\n n_pitches=n_pitches,\n hid_features=hid_features,\n hid_channels=hid_channels//2,\n out_channels=out_channels))\n \n def forward(self, chords, style, melody, groove):\n # Chords ==> (N * dimZ)\n # Style ==> (N * dimZ)\n # Melody ==> (N * nTracks * dimZ)\n # Groove ==> (N * nTracks * dimZ)\n chordOuts = self.ChordsNetwork(chords)\n barOuts=[]\n for bar in range(self.n_bars):\n trackOuts=[]\n chordOut = chordOuts[:, :, bar]\n styleOut = style\n for track in range(self.n_tracks):\n melodyInp = melody[:, track, :]\n melodyOut = self.MelodyNetworks['melodygen'+str(track)](melodyInp)[:, :, bar]\n grooveOut = groove[:, track, :]\n z = torch.cat([chordOut, styleOut, melodyOut, grooveOut], dim=1)\n trackOuts.append(self.BarGenerators['bargen'+str(track)](z))\n trackOut = torch.cat(trackOuts, dim=1)\n barOuts.append(trackOut)\n out = torch.cat(barOuts, dim=2)\n # Out ==> (N * nTracks * nBars * nStepsPerBar * nPitches)\n return out"
] |
[
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.ModuleDict",
"torch.nn.Flatten",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.Conv3d",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
pulsarise/SCAMP-I
|
[
"a814d9851075b4a89a09bd772e9dcf5dfb26788a"
] |
[
"parameter_extraction.py"
] |
[
"#! /usr/bin/env python3\n\nimport numpy as np\nimport emcee\nimport argparse\nimport pandas\n\nfrom SCAMP_I.dmcorrcalc import get_old_new_DM\n\ndef get_bestfit_params(flat_burned_samples, P0, nbins):\n # Get the best fit params out from the chains.\n pc = np.percentile(flat_burned_samples[:, 0], [16, 50, 84])\n q = np.diff(pc)\n bestsig = pc[1]\n bestsig_std_n = q[0]\n bestsig_std_p = q[1]\n pc = np.percentile(flat_burned_samples[:, 1], [16, 50, 84])\n q = np.diff(pc)\n bestmu = pc[1]\n bestmu_std_n = q[0]\n bestmu_std_p = q[1]\n pc = np.percentile(flat_burned_samples[:, 2], [16, 50, 84])\n q = np.diff(pc)\n bestA = pc[1]\n bestA_std_n = q[0]\n bestA_std_p = q[1]\n pc = np.percentile(flat_burned_samples[:, 3], [16, 50, 84])\n q = np.diff(pc)\n besttau = pc[1]\n besttau_std_n = q[0]\n besttau_std_p = q[1]\n pc = np.percentile(flat_burned_samples[:, 4], [16, 50, 84])\n q = np.diff(pc)\n bestdc = pc[1]\n bestdc_std_n = q[0]\n bestdc_std_p = q[1]\n # Make arrays.\n best_params = np.array([bestsig, bestmu, bestA, besttau, bestdc])\n positive_error = np.array([bestsig_std_p, bestmu_std_p, bestA_std_p, besttau_std_p, bestdc_std_p])\n negative_error = np.array([bestsig_std_n, bestmu_std_n, bestA_std_n, besttau_std_n, bestdc_std_n])\n # Convert from bins to seconds (only mu, sigma and tau ie the time domain variables).\n bin_to_sec = P0/nbins\n best_params_sec = np.array([bestsig*bin_to_sec, bestmu*bin_to_sec, bestA, besttau*bin_to_sec, bestdc])\n positive_error_sec = np.array([bestsig_std_p*bin_to_sec, bestmu_std_p*bin_to_sec, bestA_std_p, besttau_std_p*bin_to_sec, bestdc_std_p])\n negative_error_sec = np.array([bestsig_std_n*bin_to_sec, bestmu_std_n*bin_to_sec, bestA_std_n, besttau_std_n*bin_to_sec, bestdc_std_n])\n return best_params_sec, positive_error_sec, negative_error_sec, best_params, positive_error, negative_error\n\ndef get_samples(readdir, samplesfilename, f, burnin=10000):\n # Read samples out of the h5 file.\n reader = emcee.backends.HDFBackend('{}/{}'.format(readdir, samplesfilename), name='{}'.format(f))\n samples = reader.get_chain()\n flat_burned_samples = reader.get_chain(discard=burnin, flat=True)\n return samples, flat_burned_samples\n\ndef get_params_from_samples(samplesfilename, samplesreaddir, passed_list, burnlist, nbins, P0, freqMHz):\n # Set things up for getting the parameters.\n ndim = 5\n nchans = len(freqMHz)\n param_array = np.zeros((nchans, ndim))\n sigma = []\n mean = []\n amplitude = []\n dc = []\n sigma_error = []\n mean_error = []\n amplitude_error = []\n dc_error = []\n tau = []\n tau_error = []\n for f in range(nchans):\n if passed_list[f] == False:\n sigma.append(np.nan)\n mean.append(np.nan)\n amplitude.append(np.nan)\n tau.append(np.nan)\n dc.append(np.nan)\n sigma_error.append(np.nan)\n mean_error.append(np.nan)\n amplitude_error.append(np.nan)\n tau_error.append(np.nan)\n dc_error.append(np.nan)\n else:\n samples, fbs = get_samples(samplesreaddir, samplesfilename, f, burnin=burnlist[f])\n # Parameters.\n params_sec, paramsplus_sec, paramsminus_sec, params_bins, paramsplus_bins, paramsminus_bins = get_bestfit_params(fbs, P0, nbins)\n bestsig, bestmu, bestA, besttau, bestdc = params_bins\n # Get all the parameters.\n sigma.append(params_sec[0])\n mean.append(params_sec[1])\n amplitude.append(params_sec[2])\n tau.append(params_sec[3])\n dc.append(params_sec[4])\n sigma_error.append((paramsplus_sec[0]+paramsminus_sec[0])/2)\n mean_error.append((paramsplus_sec[1]+paramsminus_sec[1])/2)\n amplitude_error.append((paramsplus_sec[2]+paramsminus_sec[2])/2)\n tau_error.append((paramsplus_sec[3]+paramsminus_sec[3])/2)\n dc_error.append((paramsplus_sec[4]+paramsminus_sec[4])/2)\n\n return mean, mean_error, sigma, sigma_error, amplitude, amplitude_error, dc, dc_error, tau, tau_error\n\n\nif __name__ == '__main__':\n # Define options to the script.\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--run_log', type=str, help=\"All relevant information required (about data and the MCMC run) to get best fit parameters out.\")\n parser.add_argument('-o', '--outputfilename', type=str, help='Name of file to which parameters are written.')\n parser.add_argument('-w', '--writedir', default='.', type=str, help='Location where output should be written.')\n args = parser.parse_args()\n\n # Read in key info required to extract parameters.\n psrinfo = pandas.read_csv(args.run_log, index_col=0)\n psrname = str(psrinfo.PSRJ.values[0])\n samplesreaddir = str(psrinfo.SAMPLESREADDIR.values[0])\n samplesfilename = str(psrinfo.SAMPLESFILENAME.values[0])\n P0 = float(psrinfo.PERIOD.values[0])\n nbins = int(psrinfo.NBIN.values[0])\n freq = np.array(psrinfo.FREQ.values, dtype=float)\n passed_list = np.array(psrinfo.PASSED.values, dtype=bool)\n burnlist = np.array(psrinfo.BURNFRAC.values, dtype=int)*np.array(psrinfo.RUNTIME.values, dtype=int)\n\n # Obtain parameters.\n mean, mean_error, sigma, sigma_error, amplitude, amplitude_error, dc, dc_error, tau, tau_error = get_params_from_samples(samplesfilename, samplesreaddir, passed_list, burnlist, nbins, P0, freq)\n\n # Apply to file.\n psrinfo[\"MEAN\"] = mean\n psrinfo[\"SIGMA\"] = sigma\n psrinfo[\"AMPLITUDE\"] = amplitude\n psrinfo[\"DC\"] = dc\n psrinfo[\"TAU\"] = tau\n psrinfo[\"MEAN_ERROR\"] = mean_error\n psrinfo[\"SIGMA_ERROR\"] = sigma_error\n psrinfo[\"AMPLITUDE_ERROR\"] = amplitude_error\n psrinfo[\"DC_ERROR\"] = dc_error\n psrinfo[\"TAU_ERROR\"] = tau_error\n\n # Get new DM by fitting means across frequency.\n DMdelta, DMdeltastd, DM_CCval, DM_CCvalstd, DMnew = get_old_new_DM(psrinfo)\n psrinfo['DM_DELTA'] = DMdelta\n psrinfo['DM_DELTA_ERROR'] = DMdeltastd\n psrinfo['DISP_DELTA'] = DM_CCval\n psrinfo['DISP_DELTA_ERROR'] = DM_CCvalstd\n psrinfo['DM_NEW'] = DMnew\n \n psrinfo.to_csv(\"{}/{}\".format(args.writedir,args.outputfilename))\n"
] |
[
[
"pandas.read_csv",
"numpy.percentile",
"numpy.diff",
"numpy.array",
"numpy.zeros"
]
] |
lks1248/SPH-EXA
|
[
"0cac399d43118bda1ed2a5e42b593eb18ca55c30"
] |
[
"scripts/slice.py"
] |
[
"#!/usr/bin/env python3\n\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\n\n\ndef printSteps(fname):\n \"\"\" Display contents of HDF5 file: step, iteration and time \"\"\"\n ifile = h5py.File(fname, \"r\")\n print(fname, \"contains the following steps:\")\n print(\"hdf5 step number\".rjust(15), \"sph iteration\".rjust(15), \"time\".rjust(15))\n for i in range(len(list(ifile[\"/\"]))):\n h5step = ifile[\"Step#%d\" % i]\n print(\"%5d\".rjust(14) % i, \"%5d\".rjust(14) % h5step.attrs[\"step\"][0],\n \"%5f\".rjust(14) % h5step.attrs[\"time\"][0])\n\n\ndef readStep(fname, step):\n ifile = h5py.File(fname, \"r\")\n try:\n h5step = ifile[\"Step#%s\" % step]\n return h5step\n except KeyError:\n print(fname, \"step %s not found\" % step)\n printSteps(fname)\n sys.exit(1)\n\n\ndef plotSlice(fname, step):\n \"\"\" Plot a 2D xy-cross section with particles e.g. abs(z) < 0.1, using density as color \"\"\"\n\n h5step = readStep(fname, step)\n\n x = np.array(h5step[\"x\"])\n y = np.array(h5step[\"y\"])\n z = np.array(h5step[\"z\"])\n\n rho = np.array(h5step[\"rho\"])\n\n fig, ax = plt.subplots(figsize=(10, 8))\n ax.set_aspect('equal', adjustable='box')\n\n mask = abs(z) < 0.1\n # mask = (0.27 < z) & (z < 0.35)\n\n cm = plt.cm.get_cmap('plasma')\n\n plabel = fname + \", time = %3f\" % h5step.attrs[\"time\"][0] + \" N = %d\" % len(x)\n\n im = ax.scatter(x[mask], y[mask], c=rho[mask], s=10.0, cmap=cm, vmin=0, vmax=8, label=plabel)\n fig.colorbar(im)\n\n plt.legend(loc=\"lower left\")\n # plt.savefig(\"slice_%s_%3f.png\" % (fname, h5step.attrs[\"time\"][0]), format=\"png\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n # first cmdline argument: hdf5 file name to plot\n fname = sys.argv[1]\n\n # second cmdline argument: hdf5 step number to plot or print (-p) and exit\n step = sys.argv[2]\n if step == \"-p\":\n printSteps(fname)\n sys.exit(1)\n\n plotSlice(fname, step)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.subplots",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
q1135718080/yolov4-tiny
|
[
"d5248327da3ff56563e42b3786ed6a40ab9310df"
] |
[
"nets/yolo_training.py"
] |
[
"import math\nfrom random import shuffle\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom matplotlib.colors import hsv_to_rgb, rgb_to_hsv\nfrom PIL import Image\nfrom utils.utils import bbox_iou, merge_bboxes\n\n\ndef jaccard(_box_a, _box_b):\n b1_x1, b1_x2 = _box_a[:, 0] - _box_a[:, 2] / 2, _box_a[:, 0] + _box_a[:, 2] / 2\n b1_y1, b1_y2 = _box_a[:, 1] - _box_a[:, 3] / 2, _box_a[:, 1] + _box_a[:, 3] / 2\n b2_x1, b2_x2 = _box_b[:, 0] - _box_b[:, 2] / 2, _box_b[:, 0] + _box_b[:, 2] / 2\n b2_y1, b2_y2 = _box_b[:, 1] - _box_b[:, 3] / 2, _box_b[:, 1] + _box_b[:, 3] / 2\n box_a = torch.zeros_like(_box_a)\n box_b = torch.zeros_like(_box_b)\n box_a[:, 0], box_a[:, 1], box_a[:, 2], box_a[:, 3] = b1_x1, b1_y1, b1_x2, b1_y2\n box_b[:, 0], box_b[:, 1], box_b[:, 2], box_b[:, 3] = b2_x1, b2_y1, b2_x2, b2_y2\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n\n inter = inter[:, :, 0] * inter[:, :, 1]\n # 计算先验框和真实框各自的面积\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2]-box_b[:, 0]) *\n (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n # 求IOU\n union = area_a + area_b - inter\n return inter / union # [A,B]\n \n#---------------------------------------------------#\n# 平滑标签\n#---------------------------------------------------#\ndef smooth_labels(y_true, label_smoothing,num_classes):\n return y_true * (1.0 - label_smoothing) + label_smoothing / num_classes\n\ndef box_ciou(b1, b2):\n \"\"\"\n 输入为:\n ----------\n b1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n b2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n\n 返回为:\n -------\n ciou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n \"\"\"\n # 求出预测框左上角右下角\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n # 求出真实框左上角右下角\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n # 求真实框和预测框所有的iou\n intersect_mins = torch.max(b1_mins, b2_mins)\n intersect_maxes = torch.min(b1_maxes, b2_maxes)\n intersect_wh = torch.max(intersect_maxes - intersect_mins, torch.zeros_like(intersect_maxes))\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n union_area = b1_area + b2_area - intersect_area\n iou = intersect_area / torch.clamp(union_area,min = 1e-6)\n\n # 计算中心的差距\n center_distance = torch.sum(torch.pow((b1_xy - b2_xy), 2), axis=-1)\n \n # 找到包裹两个框的最小框的左上角和右下角\n enclose_mins = torch.min(b1_mins, b2_mins)\n enclose_maxes = torch.max(b1_maxes, b2_maxes)\n enclose_wh = torch.max(enclose_maxes - enclose_mins, torch.zeros_like(intersect_maxes))\n # 计算对角线距离\n enclose_diagonal = torch.sum(torch.pow(enclose_wh,2), axis=-1)\n ciou = iou - 1.0 * (center_distance) / torch.clamp(enclose_diagonal,min = 1e-6)\n \n v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(b1_wh[..., 0]/torch.clamp(b1_wh[..., 1],min = 1e-6)) - torch.atan(b2_wh[..., 0]/torch.clamp(b2_wh[..., 1],min = 1e-6))), 2)\n alpha = v / torch.clamp((1.0 - iou + v),min=1e-6)\n ciou = ciou - alpha * v\n return ciou\n \ndef clip_by_tensor(t,t_min,t_max):\n t=t.float()\n result = (t >= t_min).float() * t + (t < t_min).float() * t_min\n result = (result <= t_max).float() * result + (result > t_max).float() * t_max\n return result\n\ndef MSELoss(pred,target):\n return (pred-target)**2\n\ndef BCELoss(pred,target):\n epsilon = 1e-7\n pred = clip_by_tensor(pred, epsilon, 1.0 - epsilon)\n output = -target * torch.log(pred) - (1.0 - target) * torch.log(1.0 - pred)\n return output\n\nclass YOLOLoss(nn.Module):\n def __init__(self, anchors, num_classes, img_size, label_smooth=0, cuda=True, normalize=True):\n super(YOLOLoss, self).__init__()\n self.anchors = anchors\n self.num_anchors = len(anchors)\n self.num_classes = num_classes\n self.bbox_attrs = 5 + num_classes\n self.img_size = img_size\n self.feature_length = [img_size[0]//32,img_size[0]//16]\n self.label_smooth = label_smooth\n\n self.ignore_threshold = 0.5\n self.lambda_conf = 1.0\n self.lambda_cls = 1.0\n self.lambda_loc = 1.0\n self.cuda = cuda\n self.normalize = normalize\n\n def forward(self, input, targets=None):\n #----------------------------------------------------#\n # input的shape为 bs, 3*(5+num_classes), 13, 13\n # bs, 3*(5+num_classes), 26, 26\n #----------------------------------------------------#\n \n #-----------------------#\n # 一共多少张图片\n #-----------------------#\n bs = input.size(0)\n #-----------------------#\n # 特征层的高\n #-----------------------#\n in_h = input.size(2)\n #-----------------------#\n # 特征层的宽\n #-----------------------#\n in_w = input.size(3)\n\n #-----------------------------------------------------------------------#\n # 计算步长\n # 每一个特征点对应原来的图片上多少个像素点\n # 如果特征层为13x13的话,一个特征点就对应原来的图片上的32个像素点\n # 如果特征层为26x26的话,一个特征点就对应原来的图片上的16个像素点\n # stride_h = stride_w = 32、16、8\n #-----------------------------------------------------------------------#\n stride_h = self.img_size[1] / in_h\n stride_w = self.img_size[0] / in_w\n\n\n #-------------------------------------------------#\n # 此时获得的scaled_anchors大小是相对于特征层的\n #-------------------------------------------------#\n scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in self.anchors]\n \n #-----------------------------------------------#\n # 输入的input一共有两个,他们的shape分别是\n # batch_size, 3, 13, 13, 5 + num_classes\n # batch_size, 3, 26, 26, 5 + num_classes\n #-----------------------------------------------#\n prediction = input.view(bs, int(self.num_anchors/2),\n self.bbox_attrs, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous()\n \n # 获得置信度,是否有物体\n conf = torch.sigmoid(prediction[..., 4])\n # 种类置信度\n pred_cls = torch.sigmoid(prediction[..., 5:])\n\n #---------------------------------------------------------------#\n # 找到哪些先验框内部包含物体\n # 利用真实框和先验框计算交并比\n # mask batch_size, 3, in_h, in_w 有目标的特征点\n # noobj_mask batch_size, 3, in_h, in_w 无目标的特征点\n # t_box batch_size, 3, in_h, in_w, 4 中心宽高的真实值\n # tconf batch_size, 3, in_h, in_w 置信度真实值\n # tcls batch_size, 3, in_h, in_w, num_classes 种类真实值\n #----------------------------------------------------------------#\n mask, noobj_mask, t_box, tconf, tcls, box_loss_scale_x, box_loss_scale_y = self.get_target(targets, scaled_anchors,in_w, in_h,self.ignore_threshold)\n\n #---------------------------------------------------------------#\n # 将预测结果进行解码,判断预测结果和真实值的重合程度\n # 如果重合程度过大则忽略,因为这些特征点属于预测比较准确的特征点\n # 作为负样本不合适\n #----------------------------------------------------------------#\n noobj_mask, pred_boxes_for_ciou = self.get_ignore(prediction, targets, scaled_anchors, in_w, in_h, noobj_mask)\n\n if self.cuda:\n mask, noobj_mask = mask.cuda(), noobj_mask.cuda()\n box_loss_scale_x, box_loss_scale_y= box_loss_scale_x.cuda(), box_loss_scale_y.cuda()\n tconf, tcls = tconf.cuda(), tcls.cuda()\n pred_boxes_for_ciou = pred_boxes_for_ciou.cuda()\n t_box = t_box.cuda()\n\n box_loss_scale = 2 - box_loss_scale_x * box_loss_scale_y\n #---------------------------------------------------------------#\n # 计算预测结果和真实结果的CIOU\n #----------------------------------------------------------------#\n ciou = (1 - box_ciou( pred_boxes_for_ciou[mask.bool()], t_box[mask.bool()]))* box_loss_scale[mask.bool()]\n loss_loc = torch.sum(ciou)\n\n # 计算置信度的loss\n loss_conf = torch.sum(BCELoss(conf, mask) * mask) + \\\n torch.sum(BCELoss(conf, mask) * noobj_mask)\n \n loss_cls = torch.sum(BCELoss(pred_cls[mask == 1], smooth_labels(tcls[mask == 1],self.label_smooth,self.num_classes)))\n \n loss = loss_conf * self.lambda_conf + loss_cls * self.lambda_cls + loss_loc * self.lambda_loc\n\n if self.normalize:\n num_pos = torch.sum(mask)\n num_pos = torch.max(num_pos, torch.ones_like(num_pos))\n else:\n num_pos = bs\n\n return loss, num_pos\n\n def get_target(self, target, anchors, in_w, in_h, ignore_threshold):\n #-----------------------------------------------------#\n # 计算一共有多少张图片\n #-----------------------------------------------------#\n bs = len(target)\n #-------------------------------------------------------#\n # 获得当前特征层先验框所属的编号,方便后面对先验框筛选\n #-------------------------------------------------------#\n anchor_index = [[3,4,5],[1,2,3]][self.feature_length.index(in_w)]\n \n #-------------------------------------------------------#\n # 创建全是0或者全是1的阵列\n #-------------------------------------------------------#\n mask = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n noobj_mask = torch.ones(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n\n tx = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n ty = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n tw = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n th = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n t_box = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, 4, requires_grad=False)\n tconf = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n tcls = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, self.num_classes, requires_grad=False)\n\n box_loss_scale_x = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n box_loss_scale_y = torch.zeros(bs, int(self.num_anchors/2), in_h, in_w, requires_grad=False)\n for b in range(bs):\n if len(target[b])==0:\n continue\n #-------------------------------------------------------#\n # 计算出正样本在特征层上的中心点\n #-------------------------------------------------------#\n gxs = target[b][:, 0:1] * in_w\n gys = target[b][:, 1:2] * in_h\n \n #-------------------------------------------------------#\n # 计算出正样本相对于特征层的宽高\n #-------------------------------------------------------#\n gws = target[b][:, 2:3] * in_w\n ghs = target[b][:, 3:4] * in_h\n\n #-------------------------------------------------------#\n # 计算出正样本属于特征层的哪个特征点\n #-------------------------------------------------------#\n gis = torch.floor(gxs)\n gjs = torch.floor(gys)\n \n #-------------------------------------------------------#\n # 将真实框转换一个形式\n # num_true_box, 4\n #-------------------------------------------------------#\n gt_box = torch.FloatTensor(torch.cat([torch.zeros_like(gws), torch.zeros_like(ghs), gws, ghs], 1))\n \n #-------------------------------------------------------#\n # 将先验框转换一个形式\n # 6, 4\n #-------------------------------------------------------#\n anchor_shapes = torch.FloatTensor(torch.cat((torch.zeros((self.num_anchors, 2)), torch.FloatTensor(anchors)), 1))\n #-------------------------------------------------------#\n # 计算交并比\n # num_true_box, 6\n #-------------------------------------------------------#\n anch_ious = jaccard(gt_box, anchor_shapes)\n\n #-------------------------------------------------------#\n # 计算重合度最大的先验框是哪个\n # num_true_box, \n #-------------------------------------------------------#\n best_ns = torch.argmax(anch_ious,dim=-1)\n for i, best_n in enumerate(best_ns):\n if best_n not in anchor_index:\n continue\n #-------------------------------------------------------------#\n # 取出各类坐标:\n # gi和gj代表的是真实框对应的特征点的x轴y轴坐标\n # gx和gy代表真实框的x轴和y轴坐标\n # gw和gh代表真实框的宽和高\n #-------------------------------------------------------------#\n gi = gis[i].long()\n gj = gjs[i].long()\n gx = gxs[i]\n gy = gys[i]\n gw = gws[i]\n gh = ghs[i]\n if (gj < in_h) and (gi < in_w):\n best_n = anchor_index.index(best_n)\n #----------------------------------------#\n # noobj_mask代表无目标的特征点\n #----------------------------------------#\n noobj_mask[b, best_n, gj, gi] = 0\n #----------------------------------------#\n # mask代表有目标的特征点\n #----------------------------------------#\n mask[b, best_n, gj, gi] = 1\n #----------------------------------------#\n # tx、ty代表中心的真实值\n #----------------------------------------#\n tx[b, best_n, gj, gi] = gx\n ty[b, best_n, gj, gi] = gy\n #----------------------------------------#\n # tw、th代表宽高的真实值\n #----------------------------------------#\n tw[b, best_n, gj, gi] = gw\n th[b, best_n, gj, gi] = gh\n #----------------------------------------#\n # 用于获得xywh的比例\n # 大目标loss权重小,小目标loss权重大\n #----------------------------------------#\n box_loss_scale_x[b, best_n, gj, gi] = target[b][i, 2]\n box_loss_scale_y[b, best_n, gj, gi] = target[b][i, 3]\n #----------------------------------------#\n # tconf代表物体置信度\n #----------------------------------------#\n tconf[b, best_n, gj, gi] = 1\n #----------------------------------------#\n # tcls代表种类置信度\n #----------------------------------------#\n tcls[b, best_n, gj, gi, target[b][i, 4].long()] = 1\n else:\n print('Step {0} out of bound'.format(b))\n print('gj: {0}, height: {1} | gi: {2}, width: {3}'.format(gj, in_h, gi, in_w))\n continue\n t_box[...,0] = tx\n t_box[...,1] = ty\n t_box[...,2] = tw\n t_box[...,3] = th\n return mask, noobj_mask, t_box, tconf, tcls, box_loss_scale_x, box_loss_scale_y\n\n def get_ignore(self,prediction,target,scaled_anchors,in_w, in_h,noobj_mask):\n #-----------------------------------------------------#\n # 计算一共有多少张图片\n #-----------------------------------------------------#\n bs = len(target)\n #-------------------------------------------------------#\n # 获得当前特征层先验框所属的编号,方便后面对先验框筛选\n #-------------------------------------------------------#\n anchor_index = [[3,4,5],[1,2,3]][self.feature_length.index(in_w)]\n scaled_anchors = np.array(scaled_anchors)[anchor_index]\n\n # 先验框的中心位置的调整参数\n x = torch.sigmoid(prediction[..., 0]) \n y = torch.sigmoid(prediction[..., 1])\n # 先验框的宽高调整参数\n w = prediction[..., 2] # Width\n h = prediction[..., 3] # Height\n\n FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n\n # 生成网格,先验框中心,网格左上角\n grid_x = torch.linspace(0, in_w - 1, in_w).repeat(in_h, 1).repeat(\n int(bs*self.num_anchors/2), 1, 1).view(x.shape).type(FloatTensor)\n grid_y = torch.linspace(0, in_h - 1, in_h).repeat(in_w, 1).t().repeat(\n int(bs*self.num_anchors/2), 1, 1).view(y.shape).type(FloatTensor)\n\n # 生成先验框的宽高\n anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))\n anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))\n \n anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape)\n anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape)\n \n #-------------------------------------------------------#\n # 计算调整后的先验框中心与宽高\n #-------------------------------------------------------#\n pred_boxes = FloatTensor(prediction[..., :4].shape)\n pred_boxes[..., 0] = x + grid_x\n pred_boxes[..., 1] = y + grid_y\n pred_boxes[..., 2] = torch.exp(w) * anchor_w\n pred_boxes[..., 3] = torch.exp(h) * anchor_h\n for i in range(bs):\n pred_boxes_for_ignore = pred_boxes[i]\n #-------------------------------------------------------#\n # 将预测结果转换一个形式\n # pred_boxes_for_ignore num_anchors, 4\n #-------------------------------------------------------#\n pred_boxes_for_ignore = pred_boxes_for_ignore.view(-1, 4)\n #-------------------------------------------------------#\n # 计算真实框,并把真实框转换成相对于特征层的大小\n # gt_box num_true_box, 4\n #-------------------------------------------------------#\n if len(target[i]) > 0:\n gx = target[i][:, 0:1] * in_w\n gy = target[i][:, 1:2] * in_h\n gw = target[i][:, 2:3] * in_w\n gh = target[i][:, 3:4] * in_h\n gt_box = torch.FloatTensor(torch.cat([gx, gy, gw, gh],-1)).type(FloatTensor)\n\n #-------------------------------------------------------#\n # 计算交并比\n # anch_ious num_true_box, num_anchors\n #-------------------------------------------------------#\n anch_ious = jaccard(gt_box, pred_boxes_for_ignore)\n #-------------------------------------------------------#\n # 每个先验框对应真实框的最大重合度\n # anch_ious_max num_anchors\n #-------------------------------------------------------#\n anch_ious_max, _ = torch.max(anch_ious,dim=0)\n anch_ious_max = anch_ious_max.view(pred_boxes[i].size()[:3])\n noobj_mask[i][anch_ious_max>self.ignore_threshold] = 0\n return noobj_mask, pred_boxes\n\ndef rand(a=0, b=1):\n return np.random.rand()*(b-a) + a\n\n\nclass Generator(object):\n def __init__(self,batch_size,\n train_lines, image_size,\n ):\n \n self.batch_size = batch_size\n self.train_lines = train_lines\n self.train_batches = len(train_lines)\n self.image_size = image_size\n \n def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, random=True):\n '''r实时数据增强的随机预处理'''\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n\n if not random:\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n dx = (w-nw)//2\n dy = (h-nh)//2\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image, np.float32)\n\n # 调整目标框坐标\n box_data = np.zeros((len(box), 5))\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx\n box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)] # 保留有效框\n box_data = np.zeros((len(box), 5))\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n # resize image\n new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w-nw))\n dy = int(rand(0, h-nh))\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand()<.5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\n x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)\n x[..., 0] += hue*360\n x[..., 0][x[..., 0]>1] -= 1\n x[..., 0][x[..., 0]<0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x[:,:, 0]>360, 0] = 360\n x[:, :, 1:][x[:, :, 1:]>1] = 1\n x[x<0] = 0\n image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255\n\n # correct boxes\n box_data = np.zeros((len(box),5))\n if len(box)>0:\n np.random.shuffle(box)\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\n if flip: box[:, [0,2]] = w - box[:, [2,0]]\n box[:, 0:2][box[:, 0:2]<0] = 0\n box[:, 2][box[:, 2]>w] = w\n box[:, 3][box[:, 3]>h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box\n box_data = np.zeros((len(box),5))\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n def get_random_data_with_Mosaic(self, annotation_line, input_shape, hue=.1, sat=1.5, val=1.5):\n '''random preprocessing for real-time data augmentation'''\n h, w = input_shape\n min_offset_x = 0.3\n min_offset_y = 0.3\n scale_low = 1-min(min_offset_x,min_offset_y)\n scale_high = scale_low+0.2\n\n image_datas = [] \n box_datas = []\n index = 0\n\n place_x = [0,0,int(w*min_offset_x),int(w*min_offset_x)]\n place_y = [0,int(h*min_offset_y),int(h*min_offset_y),0]\n for line in annotation_line:\n # 每一行进行分割\n line_content = line.split()\n # 打开图片\n image = Image.open(line_content[0])\n image = image.convert(\"RGB\") \n # 图片的大小\n iw, ih = image.size\n # 保存框的位置\n box = np.array([np.array(list(map(int,box.split(',')))) for box in line_content[1:]])\n \n # 是否翻转图片\n flip = rand()<.5\n if flip and len(box)>0:\n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n box[:, [0,2]] = iw - box[:, [2,0]]\n\n # 对输入进来的图片进行缩放\n new_ar = w/h\n scale = rand(scale_low, scale_high)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n\n # 进行色域变换\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)\n val = rand(1, val) if rand()<.5 else 1/rand(1, val)\n x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)\n x[..., 0] += hue*360\n x[..., 0][x[..., 0]>1] -= 1\n x[..., 0][x[..., 0]<0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x[:,:, 0]>360, 0] = 360\n x[:, :, 1:][x[:, :, 1:]>1] = 1\n x[x<0] = 0\n image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) # numpy array, 0 to 1\n \n image = Image.fromarray((image*255).astype(np.uint8))\n # 将图片进行放置,分别对应四张分割图片的位置\n dx = place_x[index]\n dy = place_y[index]\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image)\n\n \n index = index + 1\n box_data = []\n # 对box进行重新处理\n if len(box)>0:\n np.random.shuffle(box)\n box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx\n box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy\n box[:, 0:2][box[:, 0:2]<0] = 0\n box[:, 2][box[:, 2]>w] = w\n box[:, 3][box[:, 3]>h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w>1, box_h>1)]\n box_data = np.zeros((len(box),5))\n box_data[:len(box)] = box\n \n image_datas.append(image_data)\n box_datas.append(box_data)\n\n # 将图片分割,放在一起\n cutx = np.random.randint(int(w*min_offset_x), int(w*(1 - min_offset_x)))\n cuty = np.random.randint(int(h*min_offset_y), int(h*(1 - min_offset_y)))\n\n new_image = np.zeros([h,w,3])\n new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]\n new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]\n new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]\n new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]\n\n # 对框进行进一步的处理\n new_boxes = np.array(merge_bboxes(box_datas, cutx, cuty))\n\n if len(new_boxes) == 0:\n return new_image, []\n if (new_boxes[:,:4]>0).any():\n return new_image, new_boxes\n else:\n return new_image, []\n\n def generate(self, train = True, mosaic = True):\n while True:\n shuffle(self.train_lines)\n lines = self.train_lines\n inputs = []\n targets = []\n flag = True\n n = len(lines)\n for i in range(len(lines)):\n if mosaic == True:\n if flag and (i+4) < n:\n img,y = self.get_random_data_with_Mosaic(lines[i:i+4], self.image_size[0:2])\n i = (i+4) % n\n else:\n img,y = self.get_random_data(lines[i], self.image_size[0:2], train)\n i = (i+1) % n\n flag = bool(1-flag)\n else:\n img,y = self.get_random_data(lines[i], self.image_size[0:2], train)\n i = (i+1) % n\n \n if len(y)!=0:\n boxes = np.array(y[:,:4],dtype=np.float32)\n boxes[:,0] = boxes[:,0]/self.image_size[1]\n boxes[:,1] = boxes[:,1]/self.image_size[0]\n boxes[:,2] = boxes[:,2]/self.image_size[1]\n boxes[:,3] = boxes[:,3]/self.image_size[0]\n\n boxes = np.maximum(np.minimum(boxes,1),0)\n boxes[:,2] = boxes[:,2] - boxes[:,0]\n boxes[:,3] = boxes[:,3] - boxes[:,1]\n \n boxes[:,0] = boxes[:,0] + boxes[:,2]/2\n boxes[:,1] = boxes[:,1] + boxes[:,3]/2\n y = np.concatenate([boxes,y[:,-1:]],axis=-1)\n \n img = np.array(img,dtype = np.float32)\n\n inputs.append(np.transpose(img/255.0,(2,0,1))) \n targets.append(np.array(y,dtype = np.float32))\n if len(targets) == self.batch_size:\n tmp_inp = np.array(inputs)\n tmp_targets = targets\n inputs = []\n targets = []\n yield tmp_inp, tmp_targets\n"
] |
[
[
"numpy.minimum",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.sum",
"numpy.concatenate",
"torch.FloatTensor",
"torch.pow",
"numpy.zeros",
"torch.ones_like",
"torch.sigmoid",
"torch.linspace",
"torch.floor",
"torch.min",
"torch.zeros_like",
"torch.exp",
"torch.log",
"numpy.random.rand",
"numpy.transpose",
"numpy.logical_and",
"numpy.array",
"numpy.random.shuffle",
"torch.clamp",
"torch.argmax"
]
] |
koko1996/EECS-4415-NYC-Taxi-Uber
|
[
"0e45a57f58a3fa2bd2513ce6994c552017b7a708"
] |
[
"src/analysis_html/sparkplot-residential-analysis.py"
] |
[
"from pyspark import SparkConf,SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\nfrom operator import add\nimport pyspark\nimport sys\nimport requests\nfrom pprint import pprint\nimport pandas as pd\nimport numpy as np\nfrom timescaleplot import graph\nfrom dateutil.parser import parse\nfrom datetime import datetime\n\n\n\"\"\"\nRIDE PER MONTH\nspark-submit sparkplot-business-improvement\n\"\"\"\n\n# takes a line in this format [pickup_date,pickup_time,borough] and returns a tuple in the following format\n#(pickupYear-pickupMonth,borough) where borough is the integer representation of the borough \ndef date_boro_mapper(x):\n \"\"\"\n simply map each borough to integer representation\n borough title :\n 0 is Staten Island\n 1 is Queens\n 2 is Brooklyn\n 3 is manhatan\n 4 is Bronx\n \"\"\"\n if x[2] == 'Staten Island':\n value = 0\n elif x[2] == 'Queens':\n value = 1\n elif x[2] == 'Brooklyn':\n value = 2\n elif x[2] == 'Manhattan':\n value = 3\n else:\n value = 4\n date = parse(x[0])\n return ((str(date.year) + \"-\" + str(date.month),value),1)\n\ndef date_boro_aggr(x):\n \"\"\"\n convert the [ [(year-month,borough),count],[(year-month,borough),count],[(year-month,borough),count],[(year-month,borough),count],[(year-month,borough),count]] where year-month is the same for all the entries of this array since we do group be before this mapping and boroughs are distinct\n to [list of aggregated borough counts] (we will have six versions of this each representing apr may jun of 2014 and 2015)\n \"\"\"\n temp = [0,0,0,0,0]\n for ele in x[1]:\n temp[int(ele[0][1])] += int(ele[1])\n return temp\n\nn_of_periods = 3 #3 month for now\n\n\nconf = SparkConf()\nconf.setAppName(\"Residential_Analysis\")\n\n# create spark context with the above configuration\nsc = SparkContext(conf=conf)\nsc.setLogLevel(\"ERROR\")\n\n#------Taxi Analysis\n\n# read the csv file which has no header\ntaxiFileWithNoHeader = sc.textFile(\"/taxi_combined.csv\")\n\n# map each entry to ((year-month, borough integer representaion),1) \ntaxi_date_boro = taxiFileWithNoHeader.map(lambda line: line.split(\",\")).map(date_boro_mapper)\n\n# reduce each mapped tuple by borough and year-month, then group by year-month so that we will have combined\n# data for only the months\ntaxi_d_b_group = taxi_date_boro.reduceByKey(lambda x,y: x + y).groupBy(lambda x: x[0][0])\n\n# sort the tuples by time frame (there should be 6 elements here hence sorting is not an issue)\ntaxi_d_b_sorted_group = taxi_d_b_group.sortBy(lambda x: x[0][0])\n\n# map the tuples to an array format to make the conversion to dataframe easy\ntaxi_d_b_sorted_group_d_f_format = taxi_d_b_sorted_group.map(date_boro_aggr)\n\n# convert out RDD to dataframe for the plotting\ntaxi_visu_data_frame = pd.DataFrame(taxi_d_b_sorted_group_d_f_format.collect(),index = [datetime.strptime(item[0], \"%Y-%m\") for item in taxi_d_b_sorted_group.collect()])\n\n# create the data frame for the given ranges with given frequencies\ntaxi_datetime_index = pd.date_range('2014-04-01', periods= n_of_periods, freq='MS')\ntaxi_visu = pd.DataFrame(index=taxi_datetime_index)\n\n# add the values of the RDD to the dataframe and fill zero in empty space \ntaxi_visu = taxi_visu.add(taxi_visu_data_frame, fill_value = 0).fillna(0)\n# print the output to stdout to make sure everything is as expected\npprint(taxi_visu)\n# create the html file for visualization \ngraph(taxi_visu,len(taxi_visu),'M','Monthly count per borough for taxi','2014-04')\n\n\n#------Uber Analysis\n\n# # read the csv file which has no header\n# uberFileWithNoHeader = sc.textFile(\"/uber_combined.csv\")\n\n# # map each entry to ((year-month, borough integer representaion),1) \n# uber_date_boro = uberFileWithNoHeader.map(lambda line: line.split(\",\")).map(date_boro_mapper)\n\n# # reduce each mapped tuple by borough and year-month, then group by year-month so that we will have combined\n# # data for only the months\n# uber_d_b_group = uber_date_boro.reduceByKey(lambda x,y: x + y).groupBy(lambda x: x[0][0])\n\n# # sort the tuples by time frame (there should be 6 elements here hence sorting is not an issue)\n# uber_d_b_sorted_group = uber_d_b_group.sortBy(lambda x: x[0][0])\n\n# # map the tuples to an array format to make the conversion to dataframe easy\n# uber_d_b_sorted_group_d_f_format = uber_d_b_sorted_group.map(date_boro_aggr)\n\n# # convert out RDD to dataframe for the plotting\n# uber_visu_data_frame = pd.DataFrame(uber_d_b_sorted_group_d_f_format.collect(),index = [datetime.strptime(item[0], \"%Y-%m\") for item in uber_d_b_sorted_group.collect()])\n\n# # create the data frame for the given ranges with given frequencies\n# uber_datetime_index = pd.date_range('2014-04-01', periods= n_of_periods, freq='MS')\n# uber_visu = pd.DataFrame(index=uber_datetime_index)\n# # add the values of the RDD to the dataframe and fill zero in empty space \n# uber_visu = uber_visu.add(uber_visu_data_frame, fill_value = 0).fillna(0)\n# # print the output to stdout to make sure everything is as expected\n# pprint(uber_visu)\n\n# # create the html file for visualization \n# graph(uber_visu,len(uber_visu),'M','Monthly count per borough for uber','2014-04')\n"
] |
[
[
"pandas.DataFrame",
"pandas.date_range"
]
] |
tejalkotkar/web-scraping-challenge
|
[
"583f03218498e4d0546ff4e7a92d4a26b85dcdf1"
] |
[
"Missions_to_Mars/scrape_mars.py"
] |
[
"from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndef init_browser():\n # @NOTE: Replace the path with your actual path to the chromedriver\n # executable_path = {\"executable_path\": \"chromedriver.exe\"}\n executable_path = {'executable_path': ChromeDriverManager().install()} \n return Browser(\"chrome\", **executable_path, headless=False)\n\ndef scrape_news_title(browser):\n\n # Scrape data for NASA Mars News\n url = 'https://mars.nasa.gov/news'\n browser.visit(url)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n \n # Getting the news title\n results = soup.find_all('div', class_='content_title')\n\n title = []\n for tag in results:\n # If result element has an anchor...\n if (tag.a):\n # And the anchor has non-blank text...\n if (tag.a.text):\n # Append the text to the list\n title.append(tag.a.text)\n\n # Return Data\n return(title[0])\n\n\ndef scrape_news_paragraph(browser):\n\n # Scrape data for NASA Mars News\n url = 'https://mars.nasa.gov/news'\n browser.visit(url)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n # Getting the news paragraph\n news_p = soup.find_all('div', class_='article_teaser_body')[0].text\n\n # Return Data\n return(news_p)\n\ndef scrape_featured_image_url(browser):\n \n ## JPL Mars Space Images - Featured Image\n featured_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n jpl_image_url = 'https://www.jpl.nasa.gov'\n browser.visit(featured_url)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n # Getting the featured image\n result = soup.find('article')['style']\n relative_image_path = result.replace('background-image: url(','').replace(');', '').strip()[1:-1]\n featured_image_url = jpl_image_url + relative_image_path\n \n # Return Data\n return(featured_image_url)\n\n\ndef scrape_mars_facts(browser):\n \n fact_url = 'https://space-facts.com/mars/'\n tables = pd.read_html(fact_url)\n\n fact_df = tables[0]\n fact_df.columns = ['Description','Mars']\n fact_df.set_index('Description', inplace=True)\n\n html_table = fact_df.to_html(classes = 'table table-striped')\n\n # Return Data\n return(html_table)\n\n\ndef scrape_mars_hemispheres(browser):\n\n hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n hemisphere_main_url = 'https://astrogeology.usgs.gov'\n\n browser.visit(hemispheres_url)\n\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n result_items = soup.find_all('div', class_='item')\n\n hemisphere_image_urls = []\n\n for item in result_items:\n # Get the title\n title = item.find('h3').text\n partial_img_url = item.find('a', class_='itemLink product-item')['href']\n complete_url = hemisphere_main_url + partial_img_url\n \n # Visit Url & Create soup object\n browser.visit(complete_url)\n\n image_html = browser.html\n soup = BeautifulSoup(image_html, 'html.parser')\n \n # Get the title\n image_path = soup.find('img', class_='wide-image')['src']\n img_url = hemisphere_main_url+image_path\n \n hemisphere_image_urls.append({'title':title, 'img_url':img_url})\n\n # Return Data\n return(hemisphere_image_urls)\n\ndef scrape():\n mars_info = {}\n\n # Initialise browser\n browser = init_browser()\n\n # Call functions of scraping data\n mars_info['news_title'] = scrape_news_title(browser)\n mars_info['news_p'] = scrape_news_paragraph(browser)\n mars_info['featured_image_url'] = scrape_featured_image_url(browser)\n mars_info['mars_facts'] = scrape_mars_facts(browser)\n mars_info['hemisphere_image_urls'] = scrape_mars_hemispheres(browser) \n\n # Close browser\n browser.quit()\n\n return (mars_info)"
] |
[
[
"pandas.read_html"
]
] |
Giyn/DoubanMovieRecommendationSystem
|
[
"f5a4b017a97f72ddbc8657e7d25a7093ac9e6fa2"
] |
[
"GUI/movie_detailed.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 14 09:26:03 2020\n\n@author: 许继元\n\"\"\"\n\nimport sys\n\nimport pandas as pd\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QHBoxLayout\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QTextBrowser\nfrom PyQt5.QtWidgets import QVBoxLayout\nfrom PyQt5.QtWidgets import QWidget\n\n\nclass MovieDetailed(QWidget):\n def __init__(self, name):\n super(MovieDetailed, self).__init__() # 使用super函数可以实现子类使用父类的方法\n self.setWindowTitle(\"电影详细信息\")\n self.setWindowIcon(QIcon('../Data/douban.jpg')) # 设置窗口图标\n self.resize(1400, 800)\n self.name = name\n # 电影信息\n self.movies_detailed = pd.read_csv('../Data/douban_movies.csv', encoding='utf-8')\n self.movies_detailed = self.movies_detailed.iloc[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]\n\n self.pix = QPixmap(r'../MoviePosters/' + \"{}\".format(self.name) + '.jpg')\n self.pic = QLabel(self)\n self.pic.setPixmap(self.pix)\n\n try:\n if pd.isna(list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].english_name)[0]):\n self.english_name = ''\n else:\n self.english_name = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].english_name)[0]\n self.directors = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].directors)[0]\n self.writer = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].writer)[0]\n self.actors = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].actors)[0]\n self.rate = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].rate)[0]\n if pd.isna(list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style2)[0]) and pd.isna(list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style3)[0]):\n self.style = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style1)[0]\n elif pd.isna(list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style3)[0]):\n self.style = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style1)[0] + ' ' + \\\n list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style2)[0]\n else:\n self.style = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style1)[0] + ' ' + \\\n list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style2)[0] + ' ' + \\\n list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].style3)[0]\n self.country = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].country)[0]\n self.language = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].language)[0]\n self.date = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].date)[0]\n self.duration = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].duration)[0]\n self.introduction = list(self.movies_detailed[self.movies_detailed.name == \"{}\".format(self.name)].introduction)[0]\n\n self.name_label = QLabel(\"<font size=10><b>\" + self.name + \" \" + self.english_name + \"</b></font>\")\n self.directors_label = QLabel(\"<h2>\" + \"导演: \" + self.directors + \"</h2>\")\n self.writer_label = QLabel(\"<h2>\" + \"编剧: \" + self.writer + \"</h2>\")\n self.actors_label = QLabel(\"<h2>\" + \"主演: \" + self.actors.split(' ')[0] + \"</h2>\")\n self.style_label = QLabel(\"<h2>\" + \"类型: \" + self.style + \"</h2>\")\n\n self.country_label = QLabel(\"<h2>\" + \"国家: \" + self.country + \"</h2>\")\n self.language_label = QLabel(\"<h2>\" + \"语言: \" + self.language + \"</h2>\")\n self.date_label = QLabel(\"<h2>\" + \"上映时间: \" + str(int(self.date)) + \"</h2>\")\n self.duration_label = QLabel(\"<h2>\" + \"片长: \" + str(int(self.duration)) + \"</h2>\")\n\n self.rate_label = QLabel(\"<h1>\" + \"评分: \" + str(self.rate) + \"</h1>\")\n self.introduction_label = QLabel(\"<h1><b>电影简介:</b></h1>\", self)\n self.introduction_browser = QTextBrowser(self)\n self.introduction_browser.setText(\"<h2>\" + self.introduction + \"</h2>\")\n\n self.v1_layout = QVBoxLayout()\n self.v2_layout = QVBoxLayout()\n self.h_layout = QHBoxLayout()\n self.v_layout = QVBoxLayout()\n\n self.v1_layout.addWidget(self.directors_label)\n self.v1_layout.addWidget(self.writer_label)\n self.v1_layout.addWidget(self.actors_label)\n self.v1_layout.addWidget(self.style_label)\n\n self.v2_layout.addWidget(self.country_label)\n self.v2_layout.addWidget(self.language_label)\n self.v2_layout.addWidget(self.date_label)\n self.v2_layout.addWidget(self.duration_label)\n\n self.h_layout.addWidget(self.pic)\n self.h_layout.addLayout(self.v1_layout)\n self.h_layout.addLayout(self.v2_layout)\n self.h_layout.addWidget(self.rate_label)\n\n self.v_layout.addWidget(self.name_label)\n self.v_layout.addLayout(self.h_layout)\n self.v_layout.addWidget(self.introduction_label)\n self.v_layout.addWidget(self.introduction_browser)\n\n self.setLayout(self.v_layout)\n except:\n self.resize(200, 200)\n self.no_find = QLabel(\"<h1>对不起, 没有找到相关电影!</h1>\", self)\n self.h_layout = QHBoxLayout()\n self.h_layout.addWidget(self.no_find)\n self.setLayout(self.h_layout)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n detailed = MovieDetailed(\"复仇者联盟\")\n detailed.show()\n sys.exit(app.exec_())\n"
] |
[
[
"pandas.read_csv"
]
] |
arvinsahni/ml4
|
[
"5f9f4c5ed9f60abc842e0696ddbf41305919df5f"
] |
[
"flask/app/viz_1.py"
] |
[
"from __future__ import division\n\nfrom flask import render_template, request, Response, jsonify, send_from_directory\nfrom app import app\n\nimport json\nimport psycopg2\nimport os\nimport sys\nimport psycopg2.extras\nimport pandas as pd\n\nmodule_path = os.path.abspath(os.path.join('../'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom learn import forall as fa\nfrom learn import utils\n\n@app.route('/index')\ndef index():\n return render_template('home.html')\n\n\n@app.route('/viz')\ndef viz():\n return render_template('viz.html')\n\n\ndef to_csv(d, fields):\n d.insert(0, fields)\n return Response('\\n'.join([\",\".join(map(str, e)) for e in d]), mimetype='text/csv')\n\n\n@app.route('/hist_data', methods=['GET', 'POST'])\ndef hist_data():\n website = request.args.get('website')\n person = request.args.get('person')\n db = psycopg2.connect(host='ec2-54-208-219-223.compute-1.amazonaws.com',\n database='election',\n user='elections',\n password='election2016')\n curs = db.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n DEC2FLOAT = psycopg2.extensions.new_type(\n psycopg2.extensions.DECIMAL.values,\n 'DEC2FLOAT',\n lambda value, curs: float(value) if value is not None else None)\n psycopg2.extensions.register_type(DEC2FLOAT)\n\n if website:\n sql = \"\"\"select a.bin, sum(coalesce(count,0)) from histogram_bins a\n left join (select * from data_binned where website = '%s' and person = '%s') b on a.bin = b.bin\n group by 1 order by 1\"\"\" % (website, person)\n else:\n sql = \"\"\"select a.bin, sum(coalesce(count,0)) from histogram_bins a\n left join (select * from data_binned where person = '%s') b on a.bin = b.bin\n group by 1 order by 1\"\"\" % person\n print(sql)\n curs.execute(sql)\n d = curs.fetchall()\n print(d)\n fields = ('bin', 'sum')\n return jsonify(data=d)\n\n\n@app.route('/dataset', methods=['POST'])\ndef dataset():\n # print(request.get_data())\n print(request.files)\n\n dtrain = request.files['train']\n dtest = request.files['test']\n\n #Save input data files in input folder\n dtrain.save(\"input/\" + dtrain.filename)\n dtest.save(\"input/\" + dtest.filename)\n\n df_train = pd.read_csv(\"input/\" + dtrain.filename)\n # print(df_train.head())\n\n df_test = pd.read_csv(\"input/\" + dtest.filename)\n # print(df_test.head())\n\n #From Jason's ML module\n X, y = utils.X_y_split(X_train=df_train, X_test=df_test)\n model = fa.All()\n model.fit(X, y)\n\n #Append prediction column to test set\n predictions = model.predict(df_test)\n df_test['prediction'] = predictions\n\n #Save prediction in output folder\n print(df_test.head())\n df_test.to_csv(\"output/\" + \"prediction.csv\", index=False)\n\n print(\"%s: %.3f (%s)\" % (\"Jacky's data:\", model.score, model.score_type))\n return '{ \"fake_json\":100}', 200\n\n\n@app.route('/download')\ndef download(filename=None):\n # uploads = os.path.join(current_app.root_path, app.config['UPLOAD_FOLDER'])\n return send_from_directory(directory=os.path.abspath(os.path.join('../flask/output')), filename=\"prediction.csv\")\n # return '{ \"fake_json\":100}'\n"
] |
[
[
"pandas.read_csv"
]
] |
DariusTorabian/lyrics-classifier
|
[
"f15e6d48c80ed7101080565d8061cda1766b57a3"
] |
[
"src/lyrics_scraper.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n'''\nThis module scrapes lyrics and songtitles of a given artist and saves them\nin a JSON file.\n'''\n\nimport random\nfrom time import sleep\nimport re\nimport argparse\nimport warnings\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests.adapters import HTTPAdapter\nimport pandas as pd\n\nwarnings.filterwarnings(\"ignore\")\n\ns = requests.Session()\nadapter = HTTPAdapter(max_retries=10)\ns.mount('http://', adapter)\ns.mount('https://', adapter)\ns.get('https://www.lyrics.com')\n\n\ndef get_soup_songpage(link):\n '''\n Gets soup of songpage from provided sublink\n '''\n response_songpage = s.get(\"https://www.lyrics.com/\" + link)\n soup_songpage = BeautifulSoup(response_songpage.text)\n sleep(random.uniform(0.3, 0.5))\n return soup_songpage\n\ndef get_lyrics(soup_songpage):\n '''\n Gets lyrics from soup of one songpage.\n '''\n lyric_text = soup_songpage.find(id=\"lyric-body-text\")\n return lyric_text.text\n\ndef get_title(soup_songpage):\n '''\n Gets titles from soup of one songpage.\n '''\n lyric_title = soup_songpage.find(class_=\"lyric-title\")\n return lyric_title.text\n\ndef get_songlinks(artisturl):\n '''\n Get songlinks from artistpage. Input is artisturl of lyrics.com,\n output is list of sublinks to songpages.\n '''\n response_artistpage = s.get(artisturl)\n soup_artistpage = BeautifulSoup(response_artistpage.text)\n sleep(random.uniform(0.1, 0.3))\n temp = []\n pattern = re.compile(\"/lyric/.+?(?=\\\")\")\n for el in soup_artistpage.find_all(\"a\"):\n temp.append(el)\n links = re.findall(pattern, str(temp))\n return links\n\ndef get_titlesandlyrics(artisturl, fileoutput):\n '''\n Get titles and lyrics from an artist.\n Input URL of lyrics.com artistpage and a filename for json.\n Output is a DataFrame and a json file.\n '''\n songlinks = get_songlinks(artisturl)\n dictionary_result = {\"title\":[], \"lyrics\":[]}\n x = 0\n skipped_songs = 0\n for link in songlinks[:-1]:\n try:\n soup_songpage = get_soup_songpage(link)\n song_title = get_title(soup_songpage)\n song_lyrics = get_lyrics(soup_songpage)\n if song_title not in dictionary_result[\"title\"]:\n dictionary_result[\"title\"].append(song_title)\n dictionary_result[\"lyrics\"].append(song_lyrics)\n x += 1\n print(f\"Saved {x} of {len(songlinks)-skipped_songs} songs.\")\n else:\n print(f\"Song named {song_title} already crawled, skipped.\")\n skipped_songs += 1\n except Exception:\n print(f\"Something went wrong with number {x}.\")\n print(f\"Saved {x} songs and skipped {skipped_songs} duplicates.\")\n df = pd.DataFrame(data=dictionary_result)\n df.to_json(\"data/\"+fileoutput+'.json')\n print(f\"Crawled songs have been saved in './data/{fileoutput}.json'.\")\n return df\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Scrapes lyrics from lyrics.com. Please provide the following arguments:')\n parser.add_argument('artisturl', help='lyrics.com artist page url, e.g. \"https://www.lyrics.com/artist/Eminem/347307\"')\n parser.add_argument('fileoutput', help='filename for .json output, e.g. \"Eminem\"')\n args = parser.parse_args()\n\n from pyfiglet import Figlet\n f = Figlet(font=\"graffiti\")\n\n print(f.renderText('Lyrics Scraper 9000'))\n print(f\"Lyrics scraper initialized with artisturl {args.artisturl}.\")\n print(f\"Lyrics will be saved in ./data/{args.fileoutput}.json.\")\n get_titlesandlyrics(args.artisturl, args.fileoutput)\n"
] |
[
[
"pandas.DataFrame"
]
] |
QUAPNH/NucDetSeg
|
[
"5b0400ca5dc98d09beca36d46cc55bfabb9ce4e0"
] |
[
"seg_utils/seg_transforms.py"
] |
[
"import numpy as np\nfrom numpy import random\nimport cv2\nimport torch\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n for t in self.transforms:\n img, bboxes, labels, masks = t(img, bboxes, labels, masks)\n return img, bboxes, labels, masks\n\n\nclass ConvertImgFloat(object):\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n return img.astype(np.float32), bboxes, labels, masks\n\nclass RandomContrast(object):\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n if random.randint(2):\n alpha = random.uniform(self.lower, self.upper)\n img *= alpha\n return img, bboxes, labels, masks\n\n\nclass RandomBrightness(object):\n def __init__(self, delta=32):\n assert delta >= 0.0\n assert delta <= 255.0\n self.delta = delta\n\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n if random.randint(2):\n delta = random.uniform(-self.delta, self.delta)\n img += delta\n return img, bboxes, labels, masks\n\nclass SwapChannels(object):\n def __init__(self, swaps):\n self.swaps = swaps\n def __call__(self, img):\n img = img[:, :, self.swaps]\n return img\n\n\nclass RandomLightingNoise(object):\n def __init__(self):\n self.perms = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n if random.randint(2):\n swap = self.perms[random.randint(len(self.perms))]\n shuffle = SwapChannels(swap)\n img = shuffle(img)\n return img, bboxes, labels, masks\n\n\nclass PhotometricDistort(object):\n def __init__(self):\n self.pd = RandomContrast()\n self.rb = RandomBrightness()\n self.rln = RandomLightingNoise()\n\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n img, bboxes, labels, masks = self.rb(img, bboxes, labels, masks)\n if random.randint(2):\n distort = self.pd\n else:\n distort = self.pd\n img, bboxes, labels, masks = distort(img, bboxes, labels, masks)\n img, bboxes, labels, masks = self.rln(img, bboxes, labels, masks)\n return img, bboxes, labels, masks\n\n\nclass Expand(object):\n def __init__(self, max_scale = 2, mean = (0.485, 0.456, 0.406)):\n self.mean = mean\n self.max_scale = max_scale\n\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n if random.randint(2):\n return img, bboxes, labels, masks\n h,w,c = img.shape\n ratio = random.uniform(1,self.max_scale)\n y1 = random.uniform(0, h*ratio-h)\n x1 = random.uniform(0, w*ratio-w)\n expand_img = np.zeros(shape=(int(h*ratio), int(w*ratio),c),dtype=img.dtype)\n expand_img[:,:,:] = self.mean\n expand_img[int(y1):int(y1+h), int(x1):int(x1+w)] = img\n img = expand_img\n\n num_obj,h,w = masks.shape\n expand_mask = np.zeros(shape=(num_obj,int(h*ratio), int(w*ratio)),dtype=masks.dtype)\n expand_mask[:,:,:] = 0.\n expand_mask[:, int(y1):int(y1+h), int(x1):int(x1+w)] = masks\n masks = expand_mask\n\n bboxes[:,0::2] += float(int(y1))\n bboxes[:,1::2] += float(int(x1))\n\n return img, bboxes, labels, masks\n\ndef intersect(boxes_a, box_b):\n max_yx = np.minimum(boxes_a[:,2:], box_b[2:])\n min_yx = np.maximum(boxes_a[:,:2], box_b[:2])\n inter = np.clip((max_yx-min_yx), a_min=0., a_max=np.inf)\n return inter[:,0]*inter[:,1]\n\ndef jaccard_numpy(boxes_a, box_b):\n # boxes_a: float\n # box_b: int\n inter = intersect(boxes_a, box_b)\n area_a = ((boxes_a[:,2]-boxes_a[:,0])*(boxes_a[:,3]-boxes_a[:,1]))\n area_b = ((box_b[2]-box_b[0])*(box_b[3]-box_b[1]))\n union = area_a+area_b-inter\n return inter/union #float\n\n\nclass RandomSampleCrop(object):\n def __init__(self, ratio=(0.5, 1.5), min_win = 0.9):\n self.sample_options = (\n # using entire original input image\n None,\n # sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9\n # (0.1, None),\n # (0.3, None),\n (0.7, None),\n (0.9, None),\n # randomly sample a patch\n (None, None),\n )\n self.ratio = ratio\n self.min_win = min_win\n\n def __call__(self, img, bboxes=None, labels=None, masks=None):\n height, width ,_ = img.shape\n while True:\n mode = random.choice(self.sample_options)\n if mode is None:\n return img, bboxes, labels, masks\n min_iou, max_iou = mode\n\n if min_iou is None:\n min_iou = float('-inf')\n if max_iou is None:\n max_iou = float('inf')\n\n for _ in range(50):\n current_img = img\n current_mask = masks\n w = random.uniform(self.min_win*width, width)\n h = random.uniform(self.min_win*height, height)\n if h/w<self.ratio[0] or h/w>self.ratio[1]:\n continue\n y1 = random.uniform(height-h)\n x1 = random.uniform(width-w)\n rect = np.array([int(y1), int(x1), int(y1+h), int(x1+w)])\n overlap = jaccard_numpy(bboxes, rect)\n if overlap.min()<min_iou and max_iou<overlap.max():\n continue\n current_img = current_img[rect[0]:rect[2], rect[1]:rect[3], :]\n current_mask = current_mask[:, rect[0]:rect[2], rect[1]:rect[3]]\n centers = (bboxes[:,:2]+bboxes[:,2:])/2.0\n mask1 = (rect[0]<centers[:,0])*(rect[1]<centers[:,1])\n mask2 = (rect[2]>centers[:,0])*(rect[3]>centers[:,1])\n mask = mask1*mask2\n if not mask.any():\n continue\n current_boxes = bboxes[mask,:].copy()\n current_labels = labels[mask]\n current_mask = current_mask[mask,:,:]\n current_boxes[:,:2] = np.maximum(current_boxes[:,:2], rect[:2])\n current_boxes[:,:2]-=rect[:2]\n current_boxes[:,2:] = np.minimum(current_boxes[:,2:], rect[2:])\n current_boxes[:,2:]-=rect[:2]\n return current_img, current_boxes, current_labels, current_mask\n\nclass RandomMirror_w(object):\n def __call__(self, img, bboxes, classes, masks):\n _,w,_ = img.shape\n if random.randint(2):\n img = img[:,::-1,:]\n masks = masks[:,:,::-1]\n bboxes[:,1::2] = w-bboxes[:,3::-2]\n return img, bboxes, classes, masks\n\nclass RandomMirror_h(object):\n def __call__(self, img, bboxes, classes, masks):\n h,_,_ = img.shape\n if random.randint(2):\n img = img[::-1,:,:]\n masks = masks[:,::-1,:]\n bboxes[:,0::2] = h-bboxes[:,2::-2]\n return img, bboxes, classes, masks\n\n\nclass Resize(object):\n def __init__(self, height, width):\n self.height = height\n self.width = width\n\n def __call__(self, img, bboxes, classes, masks):\n h,w,c = img.shape\n bboxes = bboxes.astype(np.float32)\n bboxes[:, 0] /= h\n bboxes[:, 1] /= w\n bboxes[:, 2] /= h\n bboxes[:, 3] /= w\n img = cv2.resize(img, dsize=(self.width, self.height))\n bboxes[:, 0] *= self.height\n bboxes[:, 1] *= self.width\n bboxes[:, 2] *= self.height\n bboxes[:, 3] *= self.width\n num_obj, h, w = masks.shape\n output_masks = np.zeros((num_obj,self.height, self.width),dtype=masks.dtype)\n for i in range(num_obj):\n output_masks[i] = cv2.resize(masks[i,:,:],\n dsize=(self.width, self.height),\n interpolation=cv2.INTER_NEAREST)\n return img, bboxes, classes, output_masks\n\nclass ToTensor(object):\n def __call__(self, img, bboxes, classes, masks):\n if isinstance(img, np.ndarray):\n img = torch.Tensor(img.copy().transpose((2,0,1)))\n\n if isinstance(bboxes, np.ndarray):\n bboxes = torch.Tensor(bboxes)\n\n if isinstance(classes, np.ndarray):\n classes = torch.Tensor(classes)\n\n if isinstance(masks, np.ndarray):\n masks = torch.Tensor(masks.copy())\n\n return img, bboxes, classes, masks\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"torch.Tensor",
"numpy.clip",
"numpy.random.choice",
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.randint"
]
] |
HyperbolicTangent/HAR
|
[
"2fd03ff7b1fb138159079b991698184da2affd22"
] |
[
"input_pipeline/S2S.py"
] |
[
"import gin\r\nimport logging\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport zipfile\r\nimport os\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn import preprocessing\r\n\r\nAUTOTUNE = tf.data.experimental.AUTOTUNE\r\n\r\nfirst_time_run = False\r\n\r\ndef one_hot_coding(x):\r\n if x == 0:\r\n y = np.zeros(12)\r\n else:\r\n y = np.eye(12)[x-1] # 12 is number of class\r\n return y\r\n\r\n\r\n@gin.configurable\r\ndef load(name, file_path):\r\n if name == \"HAPT Data Set\":\r\n logging.info(f\"Preparing dataset: {name}...\")\r\n # zip file\r\n #zfile = zipfile.ZipFile(os.path.join(file_path, \"HAPT Data Set.zip\"))\r\n #zfile.extractall(path=os.path.join(file_path, \"HAPT_Data_Set\"))\r\n\r\n rawdata_path = os.path.join(file_path, \"HAPT Data Set\", \"RawData\")\r\n names_list = os.listdir(rawdata_path)\r\n\r\n # get all users data and combine them into 6-channel data\r\n ds = {}\r\n train_ds = []\r\n test_ds = []\r\n val_ds = []\r\n train_ds_aw = []\r\n test_ds_aw = []\r\n val_ds_aw = []\r\n # set precision of decimal with 18, i.e. 18 digits after decimal point are kept\r\n np.set_printoptions(precision=18)\r\n if first_time_run:\r\n for x in range(61):\r\n train_acc_path = os.path.join(rawdata_path, names_list[x])\r\n y = x + 61\r\n train_gyro_data_path = os.path.join(rawdata_path, names_list[y])\r\n exp_path = os.path.join(rawdata_path, \"ds_exp%d.txt\" % (x+1))\r\n # open txt file of acc data\r\n with open(train_acc_path, 'r') as fa:\r\n # open txt file of gyro data\r\n with open(train_gyro_data_path, 'r') as fb:\r\n # create a new txt file and save all data into it\r\n with open(exp_path, 'w') as fc:\r\n\r\n for line in fa:\r\n\r\n # remove line break of lines in acc data txt file\r\n fc.write(line.strip('\\n'))\r\n fc.write(\" \" + fb.readline())\r\n\r\n else:\r\n for x in range(43):\r\n exp_path = os.path.join(rawdata_path, \"ds_exp%d.txt\" % (x + 1))\r\n data = np.loadtxt(exp_path)\r\n for line in data:\r\n train_ds.append(line)\r\n train_ds = preprocessing.scale(train_ds) # z-score normalization\r\n # print(train_ds.mean(axis=0)) # get mean of every channel\r\n # print(train_ds.std(axis=0)) # get standard deviation of every channel\r\n train_ds = tf.data.Dataset.from_tensor_slices(train_ds)\r\n train_ds_windows = train_ds.window(size=250, shift=125, stride=1, drop_remainder=True)\r\n for window in train_ds_windows:\r\n for line in list(window.as_numpy_iterator()):\r\n train_ds_aw.append(line)\r\n\r\n for x in range(43, 55):\r\n exp_path = os.path.join(rawdata_path, \"ds_exp%d.txt\" % (x + 1))\r\n data = np.loadtxt(exp_path)\r\n for line in data:\r\n test_ds.append(line)\r\n test_ds = preprocessing.scale(test_ds) # z-score normalization\r\n test_ds = tf.data.Dataset.from_tensor_slices(test_ds)\r\n test_ds_windows = test_ds.window(size=250, shift=125, stride=1, drop_remainder=True)\r\n for window in test_ds_windows:\r\n for line in list(window.as_numpy_iterator()):\r\n test_ds_aw.append(line)\r\n\r\n for x in range(55, 61):\r\n exp_path = os.path.join(rawdata_path, \"ds_exp%d.txt\" % (x + 1))\r\n data = np.loadtxt(exp_path)\r\n for line in data:\r\n val_ds.append(line)\r\n val_ds = preprocessing.scale(val_ds) # z-score normalization\r\n val_ds = tf.data.Dataset.from_tensor_slices(val_ds)\r\n val_ds_windows = val_ds.window(size=250, shift=125, stride=1, drop_remainder=True)\r\n for window in val_ds_windows:\r\n for line in list(window.as_numpy_iterator()):\r\n val_ds_aw.append(line)\r\n\r\n # generate label dataset according to labels.txt\r\n label_path = os.path.join(rawdata_path, \"labels.txt\")\r\n label_ds = {}\r\n labels = np.loadtxt(label_path).astype(int)\r\n for x in range(61):\r\n first_row = True\r\n list2 = []\r\n for num in range(len(labels)):\r\n a_0 = labels[num-1][4]\r\n a = labels[num][3] - 1\r\n b = labels[num][4]\r\n c = labels[num][2]\r\n if labels[num][0] == x+1:\r\n if first_row:\r\n for y in range(a):\r\n list2.append(0)\r\n for y in range(a, b):\r\n list2.append(c)\r\n first_row = False\r\n else:\r\n if a_0 == a:\r\n for y in range(a, b):\r\n list2.append(c)\r\n else:\r\n for y in range(a_0, a):\r\n list2.append(0)\r\n for y in range(a, b):\r\n list2.append(c)\r\n if labels[num][0] == x+2 and labels[num-1][2] == 2:\r\n a = len(list2)\r\n exp_path = os.path.join(rawdata_path, \"ds_exp%d.txt\" % (x + 1))\r\n ds[x] = np.loadtxt(exp_path)\r\n b = ds[x].shape[0]\r\n # print(b)\r\n for y in range(a, b):\r\n list2.append(0)\r\n break\r\n if labels[num][0] == 61 and labels[num][4] == 18097:\r\n a = len(list2)\r\n exp_path = os.path.join(rawdata_path, \"ds_exp%d.txt\" % (x + 1))\r\n ds[x] = np.loadtxt(exp_path)\r\n b = ds[x].shape[0]\r\n # print(b)\r\n for y in range(a, b):\r\n list2.append(0)\r\n break\r\n # print(len(list2))\r\n label_ds[x] = np.array(list2)\r\n # generate label dataset for each train, test and validation dataset\r\n train_label = []\r\n test_label = []\r\n val_label = []\r\n for x in range(43):\r\n for line in label_ds[x]:\r\n train_label.append(line)\r\n train_label = tf.data.Dataset.from_tensor_slices(train_label)\r\n train_label_windows = train_label.window(size=250, shift=125, stride=1, drop_remainder=True)\r\n train_label_aw = []\r\n for window in train_label_windows:\r\n for line in list(window.as_numpy_iterator()):\r\n train_label_aw.append(line)\r\n\r\n for x in range(43, 55):\r\n for line in label_ds[x]:\r\n test_label.append(line)\r\n test_label = tf.data.Dataset.from_tensor_slices(test_label)\r\n test_label_windows = test_label.window(size=250, shift=125, stride=1, drop_remainder=True)\r\n test_label_aw = []\r\n for window in test_label_windows:\r\n for line in list(window.as_numpy_iterator()):\r\n test_label_aw.append(line)\r\n\r\n for x in range(55, 61):\r\n for line in label_ds[x]:\r\n val_label.append(line)\r\n val_label = tf.data.Dataset.from_tensor_slices(val_label)\r\n val_label_windows = val_label.window(size=250, shift=125, stride=1, drop_remainder=True)\r\n val_label_aw = []\r\n for window in val_label_windows:\r\n for line in list(window.as_numpy_iterator()):\r\n val_label_aw.append(line)\r\n\r\n # generate tfrecord file for every dataset\r\n def save_tfrecords(ds_name, data, label): # desfile is the path where the tfrecord file is saved\r\n desfile = os.path.join(file_path, 'S2S_2') + '/%s.tfrecords' % ds_name\r\n with tf.io.TFRecordWriter(desfile) as writer:\r\n for i in range(len(data)):\r\n if label[i] == 0:\r\n continue\r\n else:\r\n features = tf.train.Features(\r\n feature={\r\n 'data': tf.train.Feature(\r\n bytes_list=tf.train.BytesList(value=[data[i].astype(np.float64).tostring()])),\r\n 'label': tf.train.Feature(\r\n int64_list=tf.train.Int64List(value=[label[i]]))\r\n }\r\n )\r\n example = tf.train.Example(features=features)\r\n serialized = example.SerializeToString()\r\n writer.write(serialized)\r\n return desfile\r\n \r\n traintf_path = save_tfrecords('train', train_ds_aw, train_label_aw)\r\n testtf_path = save_tfrecords('test', test_ds_aw, test_label_aw)\r\n valtf_path = save_tfrecords('validation', val_ds_aw, val_label_aw)\r\n\r\n # traintf_path = r'D:\\Uni Stuttgart\\Deep learning lab\\Human Activity Recognition\\S2S_2\\train.tfrecords'\r\n # testtf_path = r'D:\\Uni Stuttgart\\Deep learning lab\\Human Activity Recognition\\S2S_2\\test.tfrecords'\r\n # valtf_path = r'D:\\Uni Stuttgart\\Deep learning lab\\Human Activity Recognition\\S2S_2\\validation.tfrecords'\r\n\r\n raw_train_ds = tf.data.TFRecordDataset(traintf_path)\r\n raw_test_ds = tf.data.TFRecordDataset(testtf_path)\r\n raw_val_ds = tf.data.TFRecordDataset(valtf_path)\r\n\r\n def _parse_function(example_proto):\r\n feature_description = {'data': tf.io.FixedLenFeature((), tf.string),\r\n 'label': tf.io.FixedLenFeature((), tf.int64)}\r\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\r\n data = tf.io.decode_raw(parsed_features['data'], tf.float64)\r\n\r\n data = tf.reshape(data, (250, 6))\r\n # label = tf.reshape(label, (-1, 12))\r\n return data, parsed_features['label']\r\n\r\n parsed_train_ds = raw_train_ds.map(_parse_function, num_parallel_calls=AUTOTUNE)\r\n parsed_val_ds = raw_val_ds.map(_parse_function, num_parallel_calls=AUTOTUNE)\r\n parsed_test_ds = raw_test_ds.map(_parse_function, num_parallel_calls=AUTOTUNE)\r\n\r\n # Visualization of some values in parsed_train_ds\r\n trydict = {}\r\n trydict2 = {}\r\n for index, (data, label) in enumerate(parsed_train_ds):\r\n trydict[index] = data\r\n trydict2[index] = label\r\n if index ==10:\r\n break\r\n # print(trydict[250])\r\n print(trydict[1])\r\n print(trydict2[3])\r\n print(trydict2[5])\r\n print(trydict2[8])\r\n\r\n return parsed_train_ds, parsed_val_ds, parsed_test_ds\r\n\r\n\r\nif __name__ == \"__main__\":\r\n load(\"HAPT Data Set\", r'D:\\Uni Stuttgart\\Deep learning lab\\Human Activity Recognition')"
] |
[
[
"tensorflow.io.TFRecordWriter",
"tensorflow.train.Example",
"tensorflow.data.TFRecordDataset",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.set_printoptions",
"numpy.eye",
"tensorflow.io.parse_single_example",
"tensorflow.io.decode_raw",
"tensorflow.reshape",
"tensorflow.io.FixedLenFeature",
"numpy.array",
"sklearn.preprocessing.scale",
"numpy.zeros",
"numpy.loadtxt",
"tensorflow.train.Int64List"
]
] |
andremoeller/distributed
|
[
"9622b8f9bef1855412e9b23265378e2da1f47f2f"
] |
[
"distributed/protocol/tests/test_numpy.py"
] |
[
"import sys\nfrom zlib import crc32\n\nimport numpy as np\nimport pytest\n\nfrom distributed.protocol import (\n serialize,\n deserialize,\n decompress,\n dumps,\n loads,\n to_serialize,\n msgpack,\n)\nfrom distributed.protocol.utils import BIG_BYTES_SHARD_SIZE\nfrom distributed.protocol.numpy import itemsize\nfrom distributed.protocol.compression import maybe_compress\nfrom distributed.system import MEMORY_LIMIT\nfrom distributed.utils import tmpfile, nbytes\nfrom distributed.utils_test import gen_cluster\n\n\ndef test_serialize():\n x = np.ones((5, 5))\n header, frames = serialize(x)\n assert header[\"type\"]\n assert len(frames) == 1\n\n if \"compression\" in header:\n frames = decompress(header, frames)\n result = deserialize(header, frames)\n assert (result == x).all()\n\n\n@pytest.mark.parametrize(\n \"x\",\n [\n np.ones(5),\n np.array(5),\n np.random.random((5, 5)),\n np.random.random((5, 5))[::2, :],\n np.random.random((5, 5))[:, ::2],\n np.asfortranarray(np.random.random((5, 5))),\n np.asfortranarray(np.random.random((5, 5)))[::2, :],\n np.asfortranarray(np.random.random((5, 5)))[:, ::2],\n np.random.random(5).astype(\"f4\"),\n np.random.random(5).astype(\">i8\"),\n np.random.random(5).astype(\"<i8\"),\n np.arange(5).astype(\"M8[us]\"),\n np.arange(5).astype(\"M8[ms]\"),\n np.arange(5).astype(\"m8\"),\n np.arange(5).astype(\"m8[s]\"),\n np.arange(5).astype(\"c16\"),\n np.arange(5).astype(\"c8\"),\n np.array([True, False, True]),\n np.ones(shape=5, dtype=[(\"a\", \"i4\"), (\"b\", \"M8[us]\")]),\n np.array([\"abc\"], dtype=\"S3\"),\n np.array([\"abc\"], dtype=\"U3\"),\n np.array([\"abc\"], dtype=object),\n np.ones(shape=(5,), dtype=(\"f8\", 32)),\n np.ones(shape=(5,), dtype=[(\"x\", \"f8\", 32)]),\n np.ones(shape=(5,), dtype=np.dtype([(\"a\", \"i1\"), (\"b\", \"f8\")], align=False)),\n np.ones(shape=(5,), dtype=np.dtype([(\"a\", \"i1\"), (\"b\", \"f8\")], align=True)),\n np.ones(shape=(5,), dtype=np.dtype([(\"a\", \"m8[us]\")], align=False)),\n # this dtype fails unpickling\n np.ones(shape=(5,), dtype=np.dtype([(\"a\", \"m8\")], align=False)),\n np.array([(1, \"abc\")], dtype=[(\"x\", \"i4\"), (\"s\", object)]),\n np.zeros(5000, dtype=[(\"x%d\" % i, \"<f8\") for i in range(4)]),\n np.zeros(5000, dtype=\"S32\"),\n np.zeros((1, 1000, 1000)),\n np.arange(12)[::2], # non-contiguous array\n np.ones(shape=(5, 6)).astype(dtype=[(\"total\", \"<f8\"), (\"n\", \"<f8\")]),\n np.broadcast_to(np.arange(3), shape=(10, 3)), # zero-strided array\n ],\n)\ndef test_dumps_serialize_numpy(x):\n header, frames = serialize(x)\n if \"compression\" in header:\n frames = decompress(header, frames)\n buffer_interface = memoryview\n for frame in frames:\n assert isinstance(frame, (bytes, buffer_interface))\n y = deserialize(header, frames)\n\n np.testing.assert_equal(x, y)\n if x.flags.c_contiguous or x.flags.f_contiguous:\n assert x.strides == y.strides\n\n\n@pytest.mark.parametrize(\n \"x\",\n [\n np.ma.masked_array([5, 6], mask=[True, False], fill_value=10, dtype=\"i4\"),\n np.ma.masked_array([5.0, 6.0], mask=[True, False], fill_value=10, dtype=\"f4\"),\n np.ma.masked_array(\n [5.0, 6.0], mask=[True, False], fill_value=np.nan, dtype=\"f8\"\n ),\n np.ma.masked_array(\n [5.0, 6.0], mask=np.ma.nomask, fill_value=np.nan, dtype=\"f8\"\n ),\n np.ma.masked_array(\n [True, False], mask=np.ma.nomask, fill_value=True, dtype=\"bool\"\n ),\n np.ma.masked_array([\"a\", \"b\"], mask=[True, False], fill_value=\"c\", dtype=\"O\"),\n ],\n)\ndef test_serialize_numpy_ma_masked_array(x):\n (y,) = loads(dumps([to_serialize(x)]))\n assert x.data.dtype == y.data.dtype\n np.testing.assert_equal(x.data, y.data)\n np.testing.assert_equal(x.mask, y.mask)\n np.testing.assert_equal(x.fill_value, y.fill_value)\n\n\ndef test_serialize_numpy_ma_masked():\n (y,) = loads(dumps([to_serialize(np.ma.masked)]))\n assert y is np.ma.masked\n\n\ndef test_dumps_serialize_numpy_custom_dtype():\n import builtins\n\n test_rational = pytest.importorskip(\"numpy.core.test_rational\")\n rational = test_rational.rational\n try:\n builtins.rational = (\n rational # Work around https://github.com/numpy/numpy/issues/9160\n )\n x = np.array([1], dtype=rational)\n header, frames = serialize(x)\n y = deserialize(header, frames)\n\n np.testing.assert_equal(x, y)\n finally:\n del builtins.rational\n\n\ndef test_memmap():\n with tmpfile(\"npy\") as fn:\n with open(fn, \"wb\") as f: # touch file\n pass\n x = np.memmap(fn, shape=(5, 5), dtype=\"i4\", mode=\"readwrite\")\n x[:] = 5\n\n header, frames = serialize(x)\n if \"compression\" in header:\n frames = decompress(header, frames)\n y = deserialize(header, frames)\n\n np.testing.assert_equal(x, y)\n\n\n@pytest.mark.slow\ndef test_dumps_serialize_numpy_large():\n if MEMORY_LIMIT < 2e9:\n pytest.skip(\"insufficient memory\")\n x = np.random.random(size=int(BIG_BYTES_SHARD_SIZE * 2 // 8)).view(\"u1\")\n assert x.nbytes == BIG_BYTES_SHARD_SIZE * 2\n frames = dumps([to_serialize(x)])\n dtype, shape = x.dtype, x.shape\n checksum = crc32(x)\n del x\n [y] = loads(frames)\n\n assert (y.dtype, y.shape) == (dtype, shape)\n assert crc32(y) == checksum, \"Arrays are unequal\"\n\n\n@pytest.mark.parametrize(\n \"dt,size\",\n [\n (\"f8\", 8),\n (\"i4\", 4),\n (\"c16\", 16),\n (\"b\", 1),\n (\"S3\", 3),\n (\"M8[us]\", 8),\n (\"M8[s]\", 8),\n (\"U3\", 12),\n ([(\"a\", \"i4\"), (\"b\", \"f8\")], 12),\n ((\"i4\", 100), 4),\n ([(\"a\", \"i4\", 100)], 8),\n ([(\"a\", \"i4\", 20), (\"b\", \"f8\")], 20 * 4 + 8),\n ([(\"a\", \"i4\", 200), (\"b\", \"f8\")], 8),\n ],\n)\ndef test_itemsize(dt, size):\n assert itemsize(np.dtype(dt)) == size\n\n\n@pytest.mark.skipif(sys.version_info[0] < 3, reason=\"numpy doesnt use memoryviews\")\ndef test_compress_numpy():\n pytest.importorskip(\"lz4\")\n x = np.ones(10000000, dtype=\"i4\")\n frames = dumps({\"x\": to_serialize(x)})\n assert sum(map(nbytes, frames)) < x.nbytes\n\n header = msgpack.loads(frames[2], raw=False, use_list=False, strict_map_key=False)\n try:\n import blosc # noqa: F401\n except ImportError:\n pass\n else:\n assert all(c == \"blosc\" for c in header[\"headers\"][(\"x\",)][\"compression\"])\n\n\ndef test_compress_memoryview():\n mv = memoryview(b\"0\" * 1000000)\n compression, compressed = maybe_compress(mv)\n if compression:\n assert len(compressed) < len(mv)\n\n\n@pytest.mark.skip\ndef test_dont_compress_uncompressable_data():\n blosc = pytest.importorskip(\"blosc\")\n x = np.random.randint(0, 255, size=100000).astype(\"uint8\")\n header, [data] = serialize(x)\n assert \"compression\" not in header\n assert data == x.data\n\n x = np.ones(1000000)\n header, [data] = serialize(x)\n assert header[\"compression\"] == [\"blosc\"]\n assert data != x.data\n\n x = np.ones(100)\n header, [data] = serialize(x)\n assert \"compression\" not in header\n if isinstance(data, memoryview):\n assert data.obj.ctypes.data == x.ctypes.data\n\n\n@gen_cluster(client=True, timeout=60)\nasync def test_dumps_large_blosc(c, s, a, b):\n x = c.submit(np.ones, BIG_BYTES_SHARD_SIZE * 2, dtype=\"u1\")\n await x\n\n\n@pytest.mark.skipif(sys.version_info[0] < 3, reason=\"numpy doesnt use memoryviews\")\ndef test_compression_takes_advantage_of_itemsize():\n pytest.importorskip(\"lz4\")\n blosc = pytest.importorskip(\"blosc\")\n x = np.arange(1000000, dtype=\"i8\")\n\n assert len(blosc.compress(x.data, typesize=8)) < len(\n blosc.compress(x.data, typesize=1)\n )\n\n _, a = serialize(x)\n aa = [maybe_compress(frame)[1] for frame in a]\n _, b = serialize(x.view(\"u1\"))\n bb = [maybe_compress(frame)[1] for frame in b]\n\n assert sum(map(nbytes, aa)) < sum(map(nbytes, bb))\n\n\ndef test_large_numpy_array():\n x = np.ones((100000000,), dtype=\"u4\")\n header, frames = serialize(x)\n assert sum(header[\"lengths\"]) == sum(map(nbytes, frames))\n\n\n@pytest.mark.parametrize(\n \"x\",\n [\n np.broadcast_to(np.arange(10), (20, 10)), # Some strides are 0\n np.broadcast_to(1, (3, 4, 2)), # All strides are 0\n np.broadcast_to(np.arange(100)[:1], 5), # x.base is larger than x\n np.broadcast_to(np.arange(5), (4, 5))[:, ::-1],\n ],\n)\n@pytest.mark.parametrize(\"writeable\", [True, False])\ndef test_zero_strided_numpy_array(x, writeable):\n assert 0 in x.strides\n x.setflags(write=writeable)\n header, frames = serialize(x)\n y = deserialize(header, frames)\n np.testing.assert_equal(x, y)\n # Ensure we transmit fewer bytes than the full array\n assert sum(map(nbytes, frames)) < x.nbytes\n # Ensure both x and y are have same write flag\n assert x.flags.writeable == y.flags.writeable\n\n\ndef test_non_zero_strided_array():\n x = np.arange(10)\n header, frames = serialize(x)\n assert \"broadcast_to\" not in header\n assert sum(map(nbytes, frames)) == x.nbytes\n\n\ndef test_serialize_writeable_array_readonly_base_object():\n # Regression test for https://github.com/dask/distributed/issues/3252\n\n x = np.arange(3)\n # Create array which doesn't own it's own memory\n y = np.broadcast_to(x, (3, 3))\n\n # Make y writeable and it's base object (x) read-only\n y.setflags(write=True)\n x.setflags(write=False)\n\n # Serialize / deserialize y\n z = deserialize(*serialize(y))\n np.testing.assert_equal(z, y)\n\n # Ensure z and y have the same flags (including WRITEABLE)\n assert z.flags == y.flags\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.random.random",
"numpy.arange",
"numpy.memmap",
"numpy.dtype",
"numpy.ones",
"numpy.broadcast_to",
"numpy.ma.masked_array",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
miltonbd/panoptic_segmentation
|
[
"beb6211c2cc92cdbb7ae891d48d30996a1ec8150"
] |
[
"loader/isic_skin_lesion_loader.py"
] |
[
"import os\nimport collections\nimport torch\nimport torchvision\nimport numpy as np\nimport scipy.misc as m\nimport matplotlib.pyplot as plt\n\nfrom torch.utils import data\nfrom ptsemseg.augmentations import *\n\nclass IsicSkinLoader(data.Dataset):\n def __init__(self, root, split=\"train\", \n is_transform=False, img_size=None, augmentations=None, img_norm=True):\n self.root = root\n self.split = split\n self.img_size = [360, 480]\n self.is_transform = is_transform\n self.augmentations = augmentations\n self.img_norm = img_norm\n self.mean = np.array([104.00699, 116.66877, 122.67892])\n self.n_classes = 12\n self.files = collections.defaultdict(list)\n\n for split in [\"train\", \"test\", \"val\"]:\n file_list = os.listdir(root + '/' + split)\n self.files[split] = file_list\n\n def __len__(self):\n return len(self.files[self.split])\n\n def __getitem__(self, index):\n img_name = self.files[self.split][index]\n img_path = self.root + '/' + self.split + '/' + img_name\n lbl_path = self.root + '/' + self.split + 'annot/' + img_name\n\n img = m.imread(img_path)\n img = np.array(img, dtype=np.uint8)\n\n lbl = m.imread(lbl_path)\n lbl = np.array(lbl, dtype=np.int8)\n \n if self.augmentations is not None:\n img, lbl = self.augmentations(img, lbl)\n\n if self.is_transform:\n img, lbl = self.transform(img, lbl)\n\n return img, lbl\n\n def transform(self, img, lbl):\n img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode\n img = img[:, :, ::-1] # RGB -> BGR\n img = img.astype(np.float64)\n img -= self.mean\n if self.img_norm:\n # Resize scales images from 0 to 255, thus we need\n # to divide by 255.0\n img = img.astype(float) / 255.0\n # NHWC -> NCHW\n img = img.transpose(2, 0, 1)\n\n img = torch.from_numpy(img).float()\n lbl = torch.from_numpy(lbl).long()\n return img, lbl\n\n def decode_segmap(self, temp, plot=False):\n Sky = [128, 128, 128]\n Building = [128, 0, 0]\n Pole = [192, 192, 128]\n Road_marking = [255, 69, 0]\n Road = [128, 64, 128]\n Pavement = [60, 40, 222]\n Tree = [128, 128, 0]\n SignSymbol = [192, 128, 128]\n Fence = [64, 64, 128]\n Car = [64, 0, 128]\n Pedestrian = [64, 64, 0]\n Bicyclist = [0, 128, 192]\n Unlabelled = [0, 0, 0]\n\n label_colours = np.array([Sky, Building, Pole, Road, \n Pavement, Tree, SignSymbol, Fence, Car, \n Pedestrian, Bicyclist, Unlabelled])\n r = temp.copy()\n g = temp.copy()\n b = temp.copy()\n for l in range(0, self.n_classes):\n r[temp == l] = label_colours[l, 0]\n g[temp == l] = label_colours[l, 1]\n b[temp == l] = label_colours[l, 2]\n\n rgb = np.zeros((temp.shape[0], temp.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n return rgb\n\nif __name__ == '__main__':\n local_path = '/home/meetshah1995/datasets/segnet/CamVid'\n augmentations = Compose([RandomRotate(10),\n RandomHorizontallyFlip()])\n \n dst = IsicSkinLoader(local_path, is_transform=True, augmentations=augmentations)\n bs = 4\n trainloader = data.DataLoader(dst, batch_size=bs)\n for i, data in enumerate(trainloader):\n imgs, labels = data\n imgs = imgs.numpy()[:, ::-1, :, :]\n imgs = np.transpose(imgs, [0,2,3,1])\n f, axarr = plt.subplots(bs, 2)\n for j in range(bs):\n axarr[j][0].imshow(imgs[j])\n axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))\n plt.show()\n a = raw_input()\n if a == 'ex':\n break\n else:\n plt.close()\n"
] |
[
[
"scipy.misc.imresize",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.subplots",
"torch.from_numpy",
"scipy.misc.imread",
"matplotlib.pyplot.close",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] |
mattking-smith/gpmp2
|
[
"48c41c142df062832ffbe9792f0cbecdb36485cc"
] |
[
"gpmp2_python/gpmp2_python/utils/signedDistanceField2D.py"
] |
[
"import numpy as np\nfrom gtsam import *\nfrom gpmp2 import *\nimport numpy as np\nfrom gtsam import *\nfrom gpmp2 import *\nimport math\nfrom scipy import ndimage\n\n\ndef signedDistanceField2D(ground_truth_map, cell_size):\n # SIGNEDDISTANCEFIELD2D 2D signed distance field\n # Given a ground truth 2D map defined in Matrix in 0-1,\n # calculate 2D signed distance field, which is defined as a matrix\n # map matrix and signed distance field matrix have the same resolution.\n #\n # Usage: field = SIGNEDDISTANCEFIELD2D(ground_truth_map, cell_siz)\n # @map evidence grid from dataset, map use 0 show open area, 1 show objects.\n # @cell_size cell sizeto given metric information\n #\n # Output:\n # @field sdf, row is Y, col is X\n\n # regularize unknow area to open area\n cur_map = ground_truth_map > 0.75\n cur_map = cur_map.astype(int)\n\n if np.amax(cur_map) is 0:\n return np.ones(ground_truth_map.shape) * 1000\n\n # inverse map\n inv_map = 1 - cur_map\n\n # get signed distance from map and inverse map\n # since bwdist(foo) = ndimage.distance_transform_edt(1-foo)\n map_dist = ndimage.distance_transform_edt(inv_map)\n inv_map_dist = ndimage.distance_transform_edt(cur_map)\n\n field = map_dist - inv_map_dist\n\n # metric\n field = field * cell_size\n field = field.astype(float)\n\n return field\n"
] |
[
[
"numpy.amax",
"scipy.ndimage.distance_transform_edt",
"numpy.ones"
]
] |
vandermeerlab/nept
|
[
"fcb0b83d30f4be2783f3e8a9b3c842e4eef4426b"
] |
[
"nept/loaders_neuralynx.py"
] |
[
"# -*- coding: utf-8 -*-\n# Adapted from nlxio written by Bernard Willards <https://github.com/bwillers/nlxio>\n\nimport numpy as np\nimport nept\n\n\ndef load_events(filename, labels):\n \"\"\"Loads neuralynx events\n\n Parameters\n ----------\n filename: str\n labels: dict\n With event name as the key and Neuralynx event string as the value.\n\n Returns\n -------\n timestamps: dict\n\n \"\"\"\n nev_data = load_nev(filename)\n\n idx = {key: [] for key in labels}\n for key in labels:\n for i, event in enumerate(nev_data[\"event_str\"]):\n if event.decode() == labels[key]:\n idx[key].append(i)\n\n timestamps = {label: [] for label in labels}\n\n times = nev_data[\"time\"].astype(float) * 1e-6\n\n for label in labels:\n timestamps[label] = times[idx[label]]\n\n return timestamps\n\n\ndef load_lfp(filename):\n \"\"\"Loads LFP as nept.LocalFieldPotential\n\n Parameters\n ----------\n filename: str\n\n Returns\n -------\n lfp: nept.LocalFieldPotential\n\n \"\"\"\n data, time = load_ncs(filename)\n\n return nept.LocalFieldPotential(data, time)\n\n\ndef load_position(filename, pxl_to_cm):\n \"\"\"Loads videotracking position as nept.Position\n\n Parameters\n ----------\n filename: str\n pxl_to_cm: tuple\n With (x, y) conversion factors\n\n Returns\n -------\n position: nept.Position\n\n \"\"\"\n nvt_data = load_nvt(filename)\n\n xy = np.hstack(\n np.array([nvt_data[\"x\"] / pxl_to_cm[0], nvt_data[\"y\"] / pxl_to_cm[1]])[\n ..., np.newaxis\n ]\n )\n\n return nept.Position(xy, nvt_data[\"time\"])\n\n\ndef load_neuralynx_header(filename):\n \"\"\"Loads a neuralynx header.\n\n Parameters\n ----------\n filename: str\n\n Returns\n -------\n header: byte str\n\n \"\"\"\n with open(filename, \"rb\") as f:\n\n # Neuralynx files have a 16kbyte header\n header = f.read(16 * 2 ** 10)\n\n return header\n\n\ndef load_ncs(filename):\n \"\"\"Loads a neuralynx .ncs electrode file.\n\n Parameters\n ----------\n filename: str\n\n Returns\n -------\n cscs: np.array\n Voltage trace (V)\n times: np.array\n Timestamps (microseconds)\n\n \"\"\"\n\n with open(filename, \"rb\") as f:\n\n # Neuralynx files have a 16kbyte header\n header = f.read(16 * 2 ** 10)\n\n # The format for a .ncs files according the the neuralynx docs is\n # uint64 - timestamp in microseconds\n # uint32 - channel number\n # uint32 - sample freq\n # uint32 - number of valid samples\n # int16 x 512 - actual csc samples\n dt = np.dtype(\n [\n (\"time\", \"<Q\"),\n (\"channel\", \"<i\"),\n (\"freq\", \"<i\"),\n (\"valid\", \"<i\"),\n (\"csc\", \"<h\", (512,)),\n ]\n )\n data = np.fromfile(f, dt)\n\n # unpack the csc matrix\n csc = data[\"csc\"].reshape((data[\"csc\"].size,))\n\n data_times = data[\"time\"] * 1e-6\n\n # find the frequency\n frequency = np.unique(data[\"freq\"])\n if len(frequency) > 1:\n raise IOError(\"only one frequency allowed\")\n frequency = frequency[0]\n\n # .ncs files have a timestamp for every ~512 data points.\n # Here, we assign timestamps for each data sample based on the sampling frequency\n # for each of the 512 data points. Sometimes a block will have fewer than 512 data entries,\n # number is set in data['valid'].\n this_idx = 0\n n_block = 512.0\n offsets = np.arange(0, n_block / frequency, 1.0 / frequency)\n times = np.zeros(csc.shape)\n for i, (time, n_valid) in enumerate(zip(data_times, data[\"valid\"])):\n times[this_idx : this_idx + n_valid] = time + offsets[:n_valid]\n this_idx += n_valid\n\n # now find analog_to_digital conversion factor in the header\n analog_to_digital = None\n for line in header.split(b\"\\n\"):\n if line.strip().startswith(b\"-ADBitVolts\"):\n analog_to_digital = np.array(float(line.split(b\" \")[1].decode()))\n\n if analog_to_digital is None:\n raise IOError(\"ADBitVolts not found in .ncs header for \" + filename)\n\n cscs = csc * analog_to_digital\n\n return cscs, times\n\n\ndef load_nev(filename):\n \"\"\"Loads a neuralynx .nev file.\n\n Parameters\n ----------\n filename: str\n\n Returns\n -------\n nev_data: dict\n With time (uint64), id (uint16), nttl (uint16), and event_str (charx128) as the most usable keys.\n\n \"\"\"\n\n with open(filename, \"rb\") as f:\n\n # There's nothing useful in the header for .nev files, so skip past it\n f.seek(2 ** 14)\n\n # An event record is as follows:\n # int16 - nstx - reserved\n # int16 - npkt_id - id of the originating system\n # int16 - npkt_data_size - this value should always be 2\n # uint64 - timestamp, microseconds\n # int16 - nevent_id - ID value for event\n # int16 - nttl - decimal TTL value read from the TTL input port\n # int16 - ncrc - record crc check, not used in consumer applications\n # int16 - ndummy1 - reserved\n # int16 - ndummy2 - reserved\n # int32x8 - dnExtra - extra bit values for this event\n # string(128) - event string\n dt = np.dtype(\n [\n (\"filler1\", \"<h\", 3),\n (\"time\", \"<Q\"),\n (\"id\", \"<h\"),\n (\"nttl\", \"<h\"),\n (\"filler2\", \"<h\", 3),\n (\"extra\", \"<i\", 8),\n (\"event_str\", np.dtype(\"a128\")),\n ]\n )\n nev_data = np.fromfile(f, dt)\n\n return nev_data\n\n\ndef load_ntt(filename):\n \"\"\"Loads a neuralynx .ntt tetrode spike file.\n\n Parameters\n ----------\n filename: str\n\n Returns\n -------\n timestamps: np.array\n Spikes as (num_spikes, length_waveform, num_channels)\n spikes: np.array\n Spike times as uint64 (us)\n frequency: float\n Sampling frequency in waveforms (Hz)\n\n Usage:\n timestamps, spikes, frequency = load_ntt('TT13.ntt')\n\n \"\"\"\n with open(filename, \"rb\") as f:\n\n # A tetrode spike record is as folows:\n # uint64 - timestamp bytes 0:8\n # uint32 - acquisition entity number bytes 8:12\n # uint32 - classified cel number bytes 12:16\n # 8 * uint32- params bytes 16:48\n # 32 * 4 * int16 - waveform points\n # hence total record size is 2432 bits, 304 bytes\n\n # header is 16kbyte, i.e. 16 * 2^10 = 2^14\n header = f.read(16 * 2 ** 10)\n\n # Read the header and find the conversion factors / sampling frequency\n analog_to_digital = None\n frequency = None\n\n for line in header.split(b\"\\n\"):\n if line.strip().startswith(b\"-ADBitVolts\"):\n analog_to_digital = np.array(float(line.split(b\" \")[1].decode()))\n if line.strip().startswith(b\"-SamplingFrequency\"):\n frequency = float(line.split(b\" \")[1].decode())\n\n f.seek(2 ** 14) # start of the spike, records\n # Neuralynx write little endian for some dumb reason\n dt = np.dtype(\n [(\"time\", \"<Q\"), (\"filer\", \"<i\", 10), (\"spikes\", np.dtype(\"<h\"), (32, 4))]\n )\n data = np.fromfile(f, dt)\n\n if analog_to_digital is None:\n raise IOError(\"ADBitVolts not found in .ntt header for \" + filename)\n if frequency is None:\n raise IOError(\"Frequency not found in .ntt header for \" + filename)\n\n f.close()\n\n return data[\"time\"], data[\"spikes\"] * analog_to_digital, frequency\n\n\ndef load_nvt(filename, remove_empty=True):\n \"\"\"Loads a neuralynx .nvt file.\n\n Parameters\n ----------\n filename: str\n remove_empty: bool\n\n Returns\n -------\n nvt_data: dict\n With time, x, and y as keys.\n\n \"\"\"\n with open(filename, \"rb\") as f:\n\n # Neuralynx files have a 16kbyte header\n header = f.read(16 * 2 ** 10)\n\n # The format for .nvt files according the the neuralynx docs is\n # uint16 - beginning of the record\n # uint16 - ID for the system\n # uint16 - size of videorec in bytes\n # uint64 - timestamp in microseconds\n # uint32 x 400 - points with the color bitfield values\n # int16 - unused\n # int32 - extracted X location of target\n # int32 - extracted Y location of target\n # int32 - calculated head angle in degrees clockwise from the positive Y axis\n # int32 x 50 - colored targets using the same bitfield format used to extract colors earlier\n dt = np.dtype(\n [\n (\"filler1\", \"<h\", 3),\n (\"time\", \"<Q\"),\n (\"points\", \"<i\", 400),\n (\"filler2\", \"<h\"),\n (\"x\", \"<i\"),\n (\"y\", \"<i\"),\n (\"head_angle\", \"<i\"),\n (\"targets\", \"<i\", 50),\n ]\n )\n data = np.fromfile(f, dt)\n\n nvt_data = dict()\n nvt_data[\"time\"] = data[\"time\"] * 1e-6\n nvt_data[\"x\"] = np.array(data[\"x\"], dtype=float)\n nvt_data[\"y\"] = np.array(data[\"y\"], dtype=float)\n nvt_data[\"targets\"] = np.array(data[\"targets\"], dtype=float)\n\n empty_idx = (data[\"x\"] == 0) & (data[\"y\"] == 0)\n for key in nvt_data:\n if remove_empty:\n nvt_data[key] = nvt_data[key][~empty_idx]\n\n return nvt_data\n"
] |
[
[
"numpy.fromfile",
"numpy.unique",
"numpy.arange",
"numpy.dtype",
"numpy.array",
"numpy.zeros"
]
] |
PanyiDong/AutoML
|
[
"510727bd797e4f6fa213939c62d1d7601952e491"
] |
[
"My_AutoML/_imputation/_multiple.py"
] |
[
"\"\"\"\nFile: _multiple.py\nAuthor: Panyi Dong\nGitHub: https://github.com/PanyiDong/\nMathematics Department, University of Illinois at Urbana-Champaign (UIUC)\n\nProject: My_AutoML\nLatest Version: 0.2.0\nRelative Path: /My_AutoML/_imputation/_multiple.py\nFile Created: Tuesday, 5th April 2022 11:50:03 pm\nAuthor: Panyi Dong (panyid2@illinois.edu)\n\n-----\nLast Modified: Saturday, 16th April 2022 8:42:22 pm\nModified By: Panyi Dong (panyid2@illinois.edu)\n\n-----\nMIT License\n\nCopyright (c) 2022 - 2022, Panyi Dong\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport warnings\n\nfrom My_AutoML._utils import random_index, random_list\nfrom ._base import SimpleImputer\n\n\nclass ExpectationMaximization:\n\n \"\"\"\n Use Expectation Maximization (EM) to impute missing data[1]\n\n [1] Impyute.imputation.cs.em\n\n Parameters\n ----------\n iterations: maximum number of iterations for single imputation, default = 50\n\n threshold: threshold to early stop iterations, default = 0.01\n only early stop when iterations < self.iterations and change in the imputation < self.threshold\n\n seed: random seed, default = 1\n \"\"\"\n\n def __init__(self, iterations=50, threshold=0.01, seed=1):\n self.iterations = iterations\n self.threshold = threshold\n self.seed = seed\n\n self._fitted = False # whether the imputer has been fitted\n\n def fill(self, X):\n\n self.iterations = int(self.iterations)\n self.threshold = float(self.threshold)\n\n _X = X.copy(deep=True)\n n = _X.shape[0]\n\n if _X.isnull().values.any():\n _X = self._fill(_X)\n\n self._fitted = True\n\n return _X\n\n def _fill(self, X):\n\n features = list(X.columns)\n np.random.seed(self.seed)\n\n _missing_feature = [] # features contains missing values\n _missing_vector = [] # vector with missing values, to mark the missing index\n # create _missing_table with _missing_feature\n # missing index will be 1, existed index will be 0\n\n for _column in features:\n if X[_column].isnull().values.any():\n _missing_feature.append(_column)\n _missing_vector.append(\n X[_column].loc[X[_column].isnull()].index.astype(int)\n )\n\n _missing_vector = np.array(_missing_vector).T\n self._missing_table = pd.DataFrame(_missing_vector, columns=_missing_feature)\n\n for _column in list(self._missing_table.columns):\n for _index in self._missing_table[_column]:\n X.loc[_index, _column] = self._EM_iter(X, _index, _column)\n\n return X\n\n def _EM_iter(self, X, index, column):\n\n _mark = 1\n for _ in range(self.iterations):\n _mu = np.nanmean(X.loc[:, column])\n _std = np.nanstd(X.loc[:, column])\n _tmp = np.random.normal(loc=_mu, scale=_std)\n _delta = np.abs(_tmp - _mark) / _mark\n if _delta < self.threshold and self.iterations > 10:\n return _tmp\n X.loc[index, column] = _tmp\n _mark = _tmp\n return _tmp\n\n\nclass KNNImputer:\n\n \"\"\"\n Use KNN to impute the missing values, further update: use cross validation to select best k [1]\n\n [1] Stekhoven, D.J. and Bühlmann, P., 2012. MissForest—non-parametric missing value imputation\n for mixed-type data. Bioinformatics, 28(1), pp.112-118.\n\n Parameters\n ----------\n n_neighbors: list of k, default = None\n default will set to 1:10\n\n method: method to initaillay impute missing values, default = \"mean\"\n\n fold: cross validation number of folds, default = 10\n\n uni_class: unique class to be considered as categorical columns, default = 31\n\n seed: random seed, default = 1\n \"\"\"\n\n def __init__(\n self,\n n_neighbors=None,\n method=\"mean\",\n fold=10,\n uni_class=31,\n seed=1,\n ):\n self.n_neighbors = n_neighbors\n self.method = method\n self.fold = fold\n self.uni_class = uni_class\n self.seed = seed\n\n self._fitted = False # whether the imputer has been fitted\n\n def fill(self, X):\n\n features = list(X.columns)\n for _column in features:\n if len(X[_column].unique()) <= min(0.1 * len(X), self.uni_class):\n raise ValueError(\"KNN Imputation not supported for categorical data!\")\n\n _X = X.copy(deep=True)\n if _X.isnull().values.any():\n _X = self._fill(_X)\n else:\n warnings.warn(\"No nan values found, no change.\")\n\n self._fitted = True\n\n return _X\n\n def _fill(self, X):\n\n features = list(X.columns)\n\n self._missing_feature = [] # features contains missing values\n self._missing_vector = (\n []\n ) # vector with missing values, to mark the missing index\n # create _missing_table with _missing_feature\n # missing index will be 1, existed index will be 0\n\n for _column in features:\n if X[_column].isnull().values.any():\n self._missing_feature.append(_column)\n self._missing_vector.append(\n X[_column].loc[X[_column].isnull()].index.astype(int)\n )\n\n self._missing_vector = np.array(self._missing_vector).T\n self._missing_table = pd.DataFrame(\n self._missing_vector, columns=self._missing_feature\n )\n\n X = SimpleImputer(method=self.method).fill(\n X\n ) # initial filling for missing values\n\n random_features = random_list(\n self._missing_feature, self.seed\n ) # the order to regress on missing features\n # _index = random_index(len(X.index)) # random index for cross validation\n _err = []\n\n # if assigned n_neighbors, use it, otherwise use k-fold cross validation\n if self.n_neighbors is None:\n for i in range(self.fold):\n _test = X.iloc[\n i * int(len(X.index) / self.fold) : int(len(X.index) / self.fold), :\n ]\n _train = X\n _train.drop(labels=_test.index, axis=0, inplace=True)\n _err.append(self._cross_validation_knn(_train, _test, random_features))\n\n _err = np.mean(np.array(_err), axis=0) # mean of cross validation error\n self.optimial_k = np.array(_err).argmin()[0] + 1 # optimal k\n\n X = self._knn_impute(X, random_features, self.optimial_k)\n else:\n X = self._knn_impute(X, random_features, self.n_neighbors)\n\n return X\n\n def _cross_validation_knn(\n self, _train, _test, random_features\n ): # cross validation to return error\n\n from sklearn.neighbors import KNeighborsRegressor\n\n if self.n_neighbors == None:\n n_neighbors = [i + 1 for i in range(10)]\n else:\n n_neighbors = (\n self.n_neighbors\n if isinstance(self.n_neighbors, list)\n else [self.n_neighbors]\n )\n\n _test_mark = _test.copy(deep=True)\n _err = []\n\n for _k in n_neighbors:\n _test = _test_mark.copy(deep=True)\n for _feature in random_features:\n _subfeatures = list(_train.columns)\n _subfeatures.remove(_feature)\n\n fit_model = KNeighborsRegressor(n_neighbors=_k)\n fit_model.fit(_train.loc[:, _subfeatures], _train.loc[:, _feature])\n _test.loc[:, _feature] = fit_model.predict(_test.loc[:, _subfeatures])\n _err.append(((_test - _test_mark) ** 2).sum())\n\n return _err\n\n def _knn_impute(self, X, random_features, k):\n\n from sklearn.neighbors import KNeighborsRegressor\n\n features = list(X.columns)\n for _column in random_features:\n _subfeature = features.copy()\n _subfeature.remove(_column)\n X.loc[self._missing_table[_column], _column] = np.nan\n fit_model = KNeighborsRegressor(n_neighbors=k)\n fit_model.fit(\n X.loc[~X[_column].isnull(), _subfeature],\n X.loc[~X[_column].isnull(), _column],\n )\n X.loc[X[_column].isnull(), _column] = fit_model.predict(\n X.loc[X[_column].isnull(), _subfeature]\n )\n\n return X\n\n\nclass MissForestImputer:\n\n \"\"\"\n Run Random Forest to impute the missing values [1]\n\n [1] Stekhoven, D.J. and Bühlmann, P., 2012. MissForest—non-parametric missing\n value imputation for mixed-type data. Bioinformatics, 28(1), pp.112-118.\n\n Parameters\n ----------\n threshold: threshold to terminate iterations, default = 0\n At default, if difference between iterations increases, the iteration stops\n\n method: initial imputation method for missing values, default = 'mean'\n\n uni_class: column with unique classes less than uni_class will be considered as categorical, default = 31\n \"\"\"\n\n def __init__(self, threshold=0, method=\"mean\", uni_class=31):\n self.threshold = threshold\n self.method = method\n self.uni_class = uni_class\n\n self._fitted = False # whether the imputer has been fitted\n\n def _RFImputer(self, X):\n\n from sklearn.ensemble import RandomForestRegressor\n\n _delta = [] # criteria of termination\n\n while True:\n for _column in list(self._missing_table.columns):\n X_old = X.copy(deep=True)\n _subfeature = list(X_old.columns)\n _subfeature.remove(str(_column))\n _missing_index = self._missing_table[_column].tolist()\n RegModel = RandomForestRegressor()\n RegModel.fit(\n X.loc[~X.index.astype(int).isin(_missing_index), _subfeature],\n X.loc[~X.index.astype(int).isin(_missing_index), _column],\n )\n _tmp_column = RegModel.predict(\n X.loc[X.index.astype(int).isin(_missing_index), _subfeature]\n )\n X.loc[X.index.astype(int).isin(_missing_index), _column] = _tmp_column\n _delta.append(self._delta_cal(X, X_old))\n if len(_delta) >= 2 and _delta[-1] > _delta[-2]:\n break\n if len(_delta) >= 2 and _delta[-1] > _delta[-2]:\n break\n\n return X\n\n # calcualte the difference between data newly imputed and before imputation\n def _delta_cal(self, X_new, X_old):\n\n if (X_new.shape[0] != X_old.shape[0]) or (X_new.shape[1] != X_old.shape[1]):\n raise ValueError(\"New and old data must have same size, get different!\")\n\n _numerical_features = []\n _categorical_features = []\n for _column in list(self._missing_table.columns):\n if len(X_old[_column].unique()) <= self.uni_class:\n _categorical_features.append(_column)\n else:\n _numerical_features.append(_column)\n\n _N_nume = 0\n _N_deno = 0\n _F_nume = 0\n _F_deno = 0\n\n if len(_numerical_features) > 0:\n for _column in _numerical_features:\n _N_nume += ((X_new[_column] - X_old[_column]) ** 2).sum()\n _N_deno += (X_new[_column] ** 2).sum()\n\n if len(_categorical_features) > 0:\n for _column in _categorical_features:\n _F_nume += (X_new[_column] != X_old[_column]).astype(int).sum()\n _F_deno += len(self._missing_table[_column])\n\n if len(_numerical_features) > 0 and len(_categorical_features) > 0:\n return _N_nume / _N_deno + _F_nume / _F_deno\n elif len(_numerical_features) > 0:\n return _N_nume / _N_deno\n elif len(_categorical_features) > 0:\n return _F_nume / _F_deno\n\n def fill(self, X):\n\n _X = X.copy(deep=True)\n if _X.isnull().values.any():\n _X = self._fill(_X)\n else:\n warnings.warn(\"No nan values found, no change.\")\n\n self._fitted = True\n\n return _X\n\n def _fill(self, X):\n\n features = list(X.columns)\n\n for _column in features:\n if (X[_column].dtype == object) or (str(X[_column].dtype) == \"category\"):\n raise ValueError(\n \"MICE can only handle numerical filling, run encoding first!\"\n )\n\n _missing_feature = [] # features contains missing values\n _missing_vector = [] # vector with missing values, to mark the missing index\n # create _missing_table with _missing_feature\n # missing index will be 1, existed index will be 0\n _missing_count = [] # counts for missing values\n\n for _column in features:\n if X[_column].isnull().values.any():\n _missing_feature.append(_column)\n _missing_vector.append(X.loc[X[_column].isnull()].index.astype(int))\n _missing_count.append(X[_column].isnull().astype(int).sum())\n\n # reorder the missing features by missing counts increasing\n _order = np.array(_missing_count).argsort().tolist()\n _missing_count = np.array(_missing_count)[_order].tolist()\n _missing_feature = np.array(_missing_feature)[_order].tolist()\n _missing_vector = np.array(_missing_vector)[_order].T.tolist()\n\n self._missing_table = pd.DataFrame(_missing_vector, columns=_missing_feature)\n\n X = SimpleImputer(method=self.method).fill(\n X\n ) # initial filling for missing values\n X = self._RFImputer(X)\n\n return X\n\n\nclass MICE:\n\n \"\"\"\n Multiple Imputation by chained equations (MICE)\n using single imputation to initialize the imputation step, and iteratively build regression/\n classification model to impute features with missing values [1]\n\n [1] Azur, M.J., Stuart, E.A., Frangakis, C. and Leaf, P.J., 2011. Multiple imputation by\n chained equations: what is it and how does it work?. International journal of methods in\n psychiatric research, 20(1), pp.40-49.\n\n Parameters\n ----------\n cycle: how many runs of regression/imputation to build the complete data, default = 10\n\n method: the method to initially fill nan values, default = 'mean'\n supproted methods ['mean', 'zero', 'median', 'most frequent', constant]\n 'mean' : fill columns with nan values using mean of non nan values\n 'zero': fill columns with nan values using 0\n 'median': fill columns with nan values using median of non nan values\n 'most frequent': fill columns with nan values using most frequent of non nan values\n constant: fill columns with nan values using predefined values\n\n seed: random seed, default = 1\n every random draw from the minority class will increase the random seed by 1\n \"\"\"\n\n def __init__(self, cycle=10, method=\"mean\", seed=1):\n self.method = method\n self.cycle = cycle\n self.seed = seed\n\n self._fitted = False # whether the imputer has been fitted\n\n def fill(self, X):\n\n self.cycle = int(self.cycle)\n\n _X = X.copy(deep=True)\n\n if _X.isnull().values.any():\n _X = self._fill(_X)\n else:\n warnings.warn(\"No nan values found, no change.\")\n\n self._fitted = True\n\n return _X\n\n def _fill(self, X):\n\n features = list(X.columns)\n\n for _column in features:\n if (X[_column].dtype == object) or (str(X[_column].dtype) == \"category\"):\n raise ValueError(\n \"MICE can only handle numerical filling, run encoding first!\"\n )\n\n self._missing_feature = [] # features contains missing values\n self._missing_vector = (\n []\n ) # vector with missing values, to mark the missing index\n # create _missing_table with _missing_feature\n # missing index will be 1, existed index will be 0\n\n for _column in features:\n if X[_column].isnull().values.any():\n self._missing_feature.append(_column)\n self._missing_vector.append(\n X.loc[X[_column].isnull()].index.astype(int)\n )\n\n self._missing_vector = np.array(self._missing_vector).T\n self._missing_table = pd.DataFrame(\n self._missing_vector, columns=self._missing_feature\n )\n\n X = SimpleImputer(method=self.method).fill(\n X\n ) # initial filling for missing values\n\n random_features = random_list(\n self._missing_feature, self.seed\n ) # the order to regress on missing features\n\n for _ in range(self.cycle):\n X = self._cycle_impute(X, random_features)\n\n return X\n\n def _cycle_impute(self, X, random_features):\n\n from sklearn.linear_model import LinearRegression, LogisticRegression, LassoCV\n\n features = list(X.columns)\n\n for _column in random_features:\n _subfeature = features\n _subfeature.remove(_column)\n _missing_index = self._missing_table[_column].tolist()\n X.loc[X.index.astype(int).isin(_missing_index), _column] = np.nan\n if len(X[_column].unique()) == 2:\n fit_model = LogisticRegression()\n elif len(features) <= 15:\n fit_model = LinearRegression()\n else:\n fit_model = LassoCV()\n fit_model.fit(\n X.loc[~X[_column].isnull(), _subfeature],\n X.loc[~X[_column].isnull(), _column],\n )\n X.loc[X[_column].isnull(), _column] = fit_model.predict(\n X.loc[X[_column].isnull(), _subfeature]\n )\n\n return X\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"numpy.abs",
"numpy.random.seed",
"sklearn.linear_model.LogisticRegression",
"pandas.DataFrame",
"sklearn.neighbors.KNeighborsRegressor",
"numpy.random.normal",
"numpy.nanmean",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LassoCV",
"numpy.nanstd",
"numpy.array"
]
] |
meipham/transformers
|
[
"6d873bb7c72495c594791b037d774552353ad95e"
] |
[
"src/transformers/models/roberta/modeling_roberta.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model. \"\"\"\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint\nfrom torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss\n\nfrom ...activations import ACT2FN, gelu\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_roberta import RobertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"RobertaConfig\"\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\n\nclass RobertaEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(\n input_ids, self.padding_idx, past_key_values_length\n ).to(input_ids.device)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta\nclass RobertaSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass RobertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta\nclass RobertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = RobertaSelfAttention(config)\n self.output = RobertaSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass RobertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass RobertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta\nclass RobertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RobertaAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = RobertaAttention(config)\n self.intermediate = RobertaIntermediate(config)\n self.output = RobertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta\nclass RobertaEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass RobertaPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RobertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nROBERTA_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModel(RobertaPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RobertaEmbeddings(config)\n self.encoder = RobertaEncoder(config)\n\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n # Copied from transformers.models.bert.modeling_bert.BertModel.forward\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for\n tokens that are NOT MASKED, ``0`` for MASKED tokens.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n\n if not self.config.is_decoder:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROBERTA_START_DOCSTRING\n)\nclass RobertaForCausalLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\", ROBERTA_START_DOCSTRING)\nclass RobertaForMaskedLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForSequenceClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.classifier = RobertaClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForMultipleChoice(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n attention_mask=None,\n labels=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n flat_inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.roberta(\n flat_input_ids,\n position_ids=flat_position_ids,\n token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask,\n head_mask=head_mask,\n inputs_embeds=flat_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForTokenClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForQuestionAnswering(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.cat",
"torch.einsum",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.matmul",
"torch.tanh",
"torch.nn.BCEWithLogitsLoss",
"torch.tensor",
"torch.arange",
"torch.cumsum",
"torch.nn.MSELoss"
]
] |
TranHuyHoang18/GCP_TF-
|
[
"486545591dfdcfe628532c2f6640d1b9a9652522"
] |
[
"official/nlp/modeling/layers/text_layers.py"
] |
[
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras Layers for BERT-specific preprocessing.\"\"\"\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom absl import logging\nimport tensorflow as tf\n\ntry:\n import tensorflow_text as text # pylint: disable=g-import-not-at-top\nexcept ImportError:\n text = None\n\n\ndef _check_if_tf_text_installed():\n if text is None:\n raise ImportError(\"import tensorflow_text failed, please install \"\n \"'tensorflow-text-nightly'.\")\n\n\ndef round_robin_truncate_inputs(\n inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]],\n limit: Union[int, tf.Tensor],\n) -> Union[tf.RaggedTensor, List[tf.RaggedTensor]]:\n \"\"\"Truncates a list of batched segments to fit a per-example length limit.\n\n Available space is assigned one token at a time in a round-robin fashion\n to the inputs that still need some, until the limit is reached.\n (Or equivalently: the longest input is truncated by one token until the total\n length of inputs fits the limit.) Examples that fit the limit as passed in\n remain unchanged.\n\n Args:\n inputs: A list of rank-2 RaggedTensors. The i-th example is given by\n the i-th row in each list element, that is, `inputs[:][i, :]`.\n limit: The largest permissible number of tokens in total across one example.\n\n Returns:\n A list of rank-2 RaggedTensors at corresponding indices with the inputs,\n in which the rows of each RaggedTensor have been truncated such that\n the total number of tokens in each example does not exceed the `limit`.\n \"\"\"\n if not isinstance(inputs, (list, tuple)):\n return round_robin_truncate_inputs([inputs], limit)[0]\n limit = tf.cast(limit, tf.int64)\n if not all(rt.shape.rank == 2 for rt in inputs):\n raise ValueError(\"All inputs must have shape [batch_size, (items)]\")\n if len(inputs) == 1:\n return [_truncate_row_lengths(inputs[0], limit)]\n elif len(inputs) == 2:\n size_a, size_b = [rt.row_lengths() for rt in inputs]\n # Here's a brain-twister: This does round-robin assignment of quota\n # to both inputs until the limit is reached. Hint: consider separately\n # the cases of zero, one, or two inputs exceeding half the limit.\n floor_half = limit // 2\n ceil_half = limit - floor_half\n quota_a = tf.minimum(size_a, ceil_half + tf.nn.relu(floor_half - size_b))\n quota_b = tf.minimum(size_b, floor_half + tf.nn.relu(ceil_half - size_a))\n return [_truncate_row_lengths(inputs[0], quota_a),\n _truncate_row_lengths(inputs[1], quota_b)]\n else:\n raise ValueError(\"Must pass 1 or 2 inputs\")\n\n\ndef _truncate_row_lengths(ragged_tensor: tf.RaggedTensor,\n new_lengths: tf.Tensor) -> tf.RaggedTensor:\n \"\"\"Truncates the rows of `ragged_tensor` to the given row lengths.\"\"\"\n new_lengths = tf.broadcast_to(new_lengths,\n ragged_tensor.bounding_shape()[0:1])\n def fn(x):\n row, new_length = x\n return row[0:new_length]\n fn_dtype = tf.RaggedTensorSpec(dtype=ragged_tensor.dtype,\n ragged_rank=ragged_tensor.ragged_rank - 1)\n result = tf.map_fn(fn, (ragged_tensor, new_lengths), dtype=fn_dtype)\n # Work around broken shape propagation: without this, result has unknown rank.\n flat_values_shape = [None] * ragged_tensor.flat_values.shape.rank\n result = result.with_flat_values(\n tf.ensure_shape(result.flat_values, flat_values_shape))\n\n return result\n\n\nclass BertTokenizer(tf.keras.layers.Layer):\n \"\"\"Wraps BertTokenizer with pre-defined vocab as a Keras Layer.\n\n Attributes:\n tokenize_with_offsets: If true, calls\n `text.BertTokenizer.tokenize_with_offsets()` instead of plain\n `text.BertTokenizer.tokenize()` and outputs a triple of\n (tokens, start_offsets, limit_offsets).\n raw_table_access: An object with methods .lookup(keys) and .size()\n that operate on the raw lookup table of tokens. It can be used to\n look up special token synbols like [MASK].\n \"\"\"\n\n def __init__(self, *,\n vocab_file: str,\n lower_case: bool,\n tokenize_with_offsets: bool = False,\n **kwargs):\n \"\"\"Initialize a `BertTokenizer` layer.\n\n Args:\n vocab_file: A Python string with the path of the vocabulary file.\n This is a text file with newline-separated wordpiece tokens.\n This layer initializes a lookup table from it that gets used with\n `text.BertTokenizer`.\n lower_case: A Python boolean forwarded to `text.BertTokenizer`.\n If true, input text is converted to lower case (where applicable)\n before tokenization. This must be set to match the way in which\n the vocab_file was created.\n tokenize_with_offsets: A Python boolean. If true, this layer calls\n `text.BertTokenizer.tokenize_with_offsets()` instead of plain\n `text.BertTokenizer.tokenize()` and outputs a triple of\n (tokens, start_offsets, limit_offsets)\n insead of just tokens.\n **kwargs: standard arguments to Layer().\n\n Raises:\n ImportError: if importing `tensorflow_text` failed.\n \"\"\"\n _check_if_tf_text_installed()\n\n self.tokenize_with_offsets = tokenize_with_offsets\n # TODO(b/177326279): Stop storing the vocab table initializer as an\n # attribute when https://github.com/tensorflow/tensorflow/issues/46456\n # has been fixed in the TensorFlow versions of the TF Hub users that load\n # a SavedModel created from this layer. Due to that issue, loading such a\n # SavedModel forgets to add .vocab_table._initializer as a trackable\n # dependency of .vocab_table, so that saving it again to a second SavedModel\n # (e.g., the final model built using TF Hub) does not properly track\n # the ._vocab_table._initializer._filename as an Asset.\n self._vocab_table, self._vocab_initializer_donotuse = (\n self._create_vocab_table_and_initializer(vocab_file))\n self._special_tokens_dict = self._create_special_tokens_dict(\n self._vocab_table, vocab_file)\n super().__init__(**kwargs)\n self._bert_tokenizer = text.BertTokenizer(\n self._vocab_table, lower_case=lower_case)\n\n @property\n def vocab_size(self):\n return self._vocab_table.size()\n\n def _create_vocab_table_and_initializer(self, vocab_file):\n vocab_initializer = tf.lookup.TextFileInitializer(\n vocab_file,\n key_dtype=tf.string, key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64, value_index=tf.lookup.TextFileIndex.LINE_NUMBER)\n vocab_table = tf.lookup.StaticHashTable(vocab_initializer, default_value=-1)\n return vocab_table, vocab_initializer\n\n def call(self, inputs: tf.Tensor):\n \"\"\"Calls `text.BertTokenizer` on inputs.\n\n Args:\n inputs: A string Tensor of shape [batch_size].\n\n Returns:\n One or three of `RaggedTensors` if `tokenize_with_offsets` is False or\n True, respectively. These are\n tokens: A `RaggedTensor` of shape [batch_size, (words), (pieces_per_word)]\n and type int32. tokens[i,j,k] contains the k-th wordpiece of the\n j-th word in the i-th input.\n start_offsets, limit_offsets: If `tokenize_with_offsets` is True,\n RaggedTensors of type int64 with the same indices as tokens.\n Element [i,j,k] contains the byte offset at the start, or past the\n end, resp., for the k-th wordpiece of the j-th word in the i-th input.\n \"\"\"\n # Prepare to reshape the result to work around broken shape inference.\n batch_size = tf.shape(inputs)[0]\n def _reshape(rt):\n values = rt.values\n row_splits = rt.row_splits\n row_splits = tf.reshape(row_splits, [batch_size + 1])\n return tf.RaggedTensor.from_row_splits(values, row_splits)\n\n # Call the tokenizer.\n if self.tokenize_with_offsets:\n tokens, start_offsets, limit_offsets = (\n self._bert_tokenizer.tokenize_with_offsets(inputs))\n tokens = tf.cast(tokens, dtype=tf.int32)\n return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)\n else:\n tokens = self._bert_tokenizer.tokenize(inputs)\n tokens = tf.cast(tokens, dtype=tf.int32)\n return _reshape(tokens)\n\n def get_config(self):\n # Skip in tf.saved_model.save(); fail if called direcly.\n # TODO(arnoegw): Implement when switching to MutableHashTable, which gets\n # initialized from the checkpoint and not from a vocab file.\n # We cannot just put the original, user-supplied vocab file name into\n # the config, because the path has to change as the SavedModel is copied\n # around.\n raise NotImplementedError(\"Not implemented yet.\")\n\n def get_special_tokens_dict(self):\n \"\"\"Returns dict of token ids, keyed by standard names for their purpose.\n\n Returns:\n A dict from Python strings to Python integers. Each key is a standard\n name for a special token describing its use. (For example, \"padding_id\"\n is what BERT traditionally calls \"[PAD]\" but others may call \"<pad>\".)\n The corresponding value is the integer token id. If a special token\n is not found, its entry is omitted from the dict.\n\n The supported keys and tokens are:\n * start_of_sequence_id: looked up from \"[CLS]\"\n * end_of_segment_id: looked up from \"[SEP]\"\n * padding_id: looked up form \"[PAD]\"\n * mask_id: looked up from \"[MASK]\"\n * vocab_size: one past the largest token id used\n \"\"\"\n return self._special_tokens_dict\n\n def _create_special_tokens_dict(self, vocab_table, vocab_file):\n special_tokens = dict(start_of_sequence_id=\"[CLS]\",\n end_of_segment_id=\"[SEP]\",\n padding_id=\"[PAD]\",\n mask_id=\"[MASK]\")\n with tf.init_scope():\n if tf.executing_eagerly():\n special_token_ids = vocab_table.lookup(\n tf.constant(list(special_tokens.values()), tf.string))\n vocab_size = vocab_table.size()\n else:\n # A blast from the past: non-eager init context while building Model.\n # This can happen with Estimator or tf.compat.v1.disable_v2_behavior().\n logging.warning(\n \"Non-eager init context; computing \"\n \"BertTokenizer's special_tokens_dict in tf.compat.v1.Session\")\n with tf.Graph().as_default():\n local_vocab_table, _ = self._create_vocab_table_and_initializer(\n vocab_file)\n special_token_ids_tensor = local_vocab_table.lookup(\n tf.constant(list(special_tokens.values()), tf.string))\n vocab_size_tensor = local_vocab_table.size()\n init_ops = [tf.compat.v1.initialize_all_tables()]\n with tf.compat.v1.Session() as sess:\n sess.run(init_ops)\n special_token_ids, vocab_size = sess.run(\n [special_token_ids_tensor, vocab_size_tensor])\n result = dict(\n vocab_size=int(vocab_size) # Numpy to Python.\n )\n for k, v in zip(special_tokens, special_token_ids):\n v = int(v)\n if v >= 0:\n result[k] = v\n else:\n logging.warning(\"Could not find %s as token \\\"%s\\\" in vocab file %s\",\n k, special_tokens[k], vocab_file)\n return result\n\n\nclass SentencepieceTokenizer(tf.keras.layers.Layer):\n \"\"\"Wraps tf_text.SentencepieceTokenizer as a Keras Layer.\n\n Attributes:\n tokenize_with_offsets: If true, calls\n SentencepieceTokenizer.tokenize_with_offsets()\n instead of plain .tokenize() and outputs a triple of\n (tokens, start_offsets, limit_offsets).\n \"\"\"\n\n def __init__(self,\n *,\n lower_case: bool,\n model_file_path: Optional[str] = None,\n model_serialized_proto: Optional[str] = None,\n tokenize_with_offsets: bool = False,\n nbest_size: int = 0,\n alpha: float = 1.0,\n strip_diacritics: bool = False,\n **kwargs):\n \"\"\"Initializes a SentencepieceTokenizer layer.\n\n Args:\n lower_case: A Python boolean indicating whether to lowercase the string\n before tokenization. NOTE: New models are encouraged to build `*_cf`\n (case folding) normalization into the Sentencepiece model itself and\n avoid this extra step.\n model_file_path: A Python string with the path of the sentencepiece model.\n Exactly one of `model_file_path` and `model_serialized_proto` can be\n specified. In either case, the Keras model config for this layer will\n store the actual proto (not a filename passed here).\n model_serialized_proto: The sentencepiece model serialized proto string.\n tokenize_with_offsets: A Python boolean. If true, this layer calls\n SentencepieceTokenizer.tokenize_with_offsets() instead of\n plain .tokenize() and outputs a triple of\n (tokens, start_offsets, limit_offsets) insead of just tokens.\n Note that when following `strip_diacritics` is set to True, returning\n offsets is not supported now.\n nbest_size: A scalar for sampling:\n nbest_size = {0,1}: No sampling is performed. (default)\n nbest_size > 1: samples from the nbest_size results.\n nbest_size < 0: assuming that nbest_size is infinite and samples\n from the all hypothesis (lattice) using\n forward-filtering-and-backward-sampling algorithm.\n alpha: A scalar for a smoothing parameter. Inverse temperature for\n probability rescaling.\n strip_diacritics: Whether to strip diacritics or not. Note that stripping\n diacritics requires additional text normalization and dropping bytes,\n which makes it impossible to keep track of the offsets now. Hence\n when `strip_diacritics` is set to True, we don't yet support\n `tokenize_with_offsets`. NOTE: New models are encouraged to put this\n into custom normalization rules for the Sentencepiece model itself to\n avoid this extra step and the limitation regarding offsets.\n **kwargs: standard arguments to Layer().\n\n Raises:\n ImportError: if importing tensorflow_text failed.\n \"\"\"\n _check_if_tf_text_installed()\n super().__init__(**kwargs)\n if bool(model_file_path) == bool(model_serialized_proto):\n raise ValueError(\"Exact one of `model_file_path` and \"\n \"`model_serialized_proto` can be specified.\")\n # TODO(b/181866850): Support tokenize_with_offsets for strip_diacritics=True\n if tokenize_with_offsets and strip_diacritics:\n raise ValueError(\"`tokenize_with_offsets` is not supported when \"\n \"`strip_diacritics` is set to True.\")\n if model_file_path:\n self._model_serialized_proto = tf.io.gfile.GFile(model_file_path,\n \"rb\").read()\n else:\n self._model_serialized_proto = model_serialized_proto\n\n self._lower_case = lower_case\n self.tokenize_with_offsets = tokenize_with_offsets\n self._nbest_size = nbest_size\n self._alpha = alpha\n self._strip_diacritics = strip_diacritics\n self._tokenizer = self._create_tokenizer()\n self._special_tokens_dict = self._create_special_tokens_dict()\n\n def _create_tokenizer(self):\n return text.SentencepieceTokenizer(\n model=self._model_serialized_proto,\n out_type=tf.int32,\n nbest_size=self._nbest_size,\n alpha=self._alpha)\n\n @property\n def vocab_size(self):\n return self._tokenizer.vocab_size()\n\n def call(self, inputs: tf.Tensor):\n \"\"\"Calls text.SentencepieceTokenizer on inputs.\n\n Args:\n inputs: A string Tensor of shape [batch_size].\n\n Returns:\n One or three of RaggedTensors if tokenize_with_offsets is False or True,\n respectively. These are\n tokens: A RaggedTensor of shape [batch_size, (pieces)] and type int32.\n tokens[i,j] contains the j-th piece in the i-th input.\n start_offsets, limit_offsets: If tokenize_with_offsets is True,\n RaggedTensors of type int64 with the same indices as tokens.\n Element [i,j] contains the byte offset at the start, or past the\n end, resp., for the j-th piece in the i-th input.\n \"\"\"\n if self._strip_diacritics:\n if self.tokenize_with_offsets:\n raise ValueError(\"`tokenize_with_offsets` is not supported yet when \"\n \"`strip_diacritics` is set to True (b/181866850).\")\n inputs = text.normalize_utf8(inputs, \"NFD\")\n inputs = tf.strings.regex_replace(inputs, r\"\\p{Mn}\", \"\")\n\n if self._lower_case:\n inputs = text.case_fold_utf8(inputs)\n\n # Prepare to reshape the result to work around broken shape inference.\n batch_size = tf.shape(inputs)[0]\n def _reshape(rt):\n values = rt.values\n row_splits = rt.row_splits\n row_splits = tf.reshape(row_splits, [batch_size + 1])\n return tf.RaggedTensor.from_row_splits(values, row_splits)\n\n # Call the tokenizer.\n if self.tokenize_with_offsets:\n tokens, start_offsets, limit_offsets = (\n self._tokenizer.tokenize_with_offsets(inputs))\n return _reshape(tokens), _reshape(start_offsets), _reshape(limit_offsets)\n else:\n tokens = self._tokenizer.tokenize(inputs)\n return _reshape(tokens)\n\n def get_config(self):\n raise NotImplementedError(\"b/170480226\")\n # TODO(b/170480226): Uncomment and improve to fix the bug.\n # config = {\n # \"model_serialized_proto\": self._model_serialized_proto,\n # \"lower_case\": self._lower_case,\n # \"tokenize_with_offsets\": self.tokenize_with_offsets,\n # \"nbest_size\": self._nbest_size,\n # \"alpha\": self._alpha,\n # \"strip_diacritics\": self._strip_diacritics,\n # }\n # base_config = super(SentencepieceTokenizer, self).get_config()\n # base_config.update(config)\n # return base_config\n\n def get_special_tokens_dict(self):\n \"\"\"Returns dict of token ids, keyed by standard names for their purpose.\n\n Returns:\n A dict from Python strings to Python integers. Each key is a standard\n name for a special token describing its use. (For example, \"padding_id\"\n is what Sentencepiece calls \"<pad>\" but others may call \"[PAD]\".)\n The corresponding value is the integer token id. If a special token\n is not found, its entry is omitted from the dict.\n\n The supported keys and tokens are:\n * start_of_sequence_id: looked up from \"[CLS]\"\n * end_of_segment_id: looked up from \"[SEP]\"\n * padding_id: looked up from \"<pad>\"\n * mask_id: looked up from \"[MASK]\"\n * vocab_size: one past the largest token id used\n \"\"\"\n return self._special_tokens_dict\n\n def _create_special_tokens_dict(self):\n special_tokens = dict(\n start_of_sequence_id=b\"[CLS]\",\n end_of_segment_id=b\"[SEP]\",\n padding_id=b\"<pad>\",\n mask_id=b\"[MASK]\")\n with tf.init_scope():\n if tf.executing_eagerly():\n special_token_ids = self._tokenizer.string_to_id(\n tf.constant(list(special_tokens.values()), tf.string))\n inverse_tokens = self._tokenizer.id_to_string(special_token_ids)\n vocab_size = self._tokenizer.vocab_size()\n else:\n # A blast from the past: non-eager init context while building Model.\n # This can happen with Estimator or tf.compat.v1.disable_v2_behavior().\n logging.warning(\n \"Non-eager init context; computing SentencepieceTokenizer's \"\n \"special_tokens_dict in tf.compat.v1.Session\")\n with tf.Graph().as_default():\n local_tokenizer = self._create_tokenizer()\n special_token_ids_tensor = local_tokenizer.string_to_id(\n tf.constant(list(special_tokens.values()), tf.string))\n inverse_tokens_tensor = local_tokenizer.id_to_string(\n special_token_ids_tensor)\n vocab_size_tensor = local_tokenizer.vocab_size()\n with tf.compat.v1.Session() as sess:\n special_token_ids, inverse_tokens, vocab_size = sess.run(\n [special_token_ids_tensor, inverse_tokens_tensor,\n vocab_size_tensor])\n result = dict(\n vocab_size=int(vocab_size) # Numpy to Python.\n )\n for name, token_id, inverse_token in zip(special_tokens,\n special_token_ids,\n inverse_tokens):\n if special_tokens[name] == inverse_token:\n result[name] = int(token_id)\n else:\n logging.warning(\n \"Could not find %s as token \\\"%s\\\" in sentencepiece model, \"\n \"got \\\"%s\\\"\", name, special_tokens[name], inverse_token)\n return result\n\n\nclass BertPackInputs(tf.keras.layers.Layer):\n \"\"\"Packs tokens into model inputs for BERT.\"\"\"\n\n def __init__(self,\n seq_length,\n *,\n start_of_sequence_id=None,\n end_of_segment_id=None,\n padding_id=None,\n special_tokens_dict=None,\n truncator=\"round_robin\",\n **kwargs):\n \"\"\"Initializes with a target seq_length, relevant token ids and truncator.\n\n Args:\n seq_length: The desired output length. Must not exceed the max_seq_length\n that was fixed at training time for the BERT model receiving the inputs.\n start_of_sequence_id: The numeric id of the token that is to be placed\n at the start of each sequence (called \"[CLS]\" for BERT).\n end_of_segment_id: The numeric id of the token that is to be placed\n at the end of each input segment (called \"[SEP]\" for BERT).\n padding_id: The numeric id of the token that is to be placed into the\n unused positions after the last segment in the sequence\n (called \"[PAD]\" for BERT).\n special_tokens_dict: Optionally, a dict from Python strings to Python\n integers that contains values for start_of_sequence_id,\n end_of_segment_id and padding_id. (Further values in the dict are\n silenty ignored.) If this is passed, separate *_id arguments must be\n omitted.\n truncator: The algorithm to truncate a list of batched segments to fit a\n per-example length limit. The value can be either \"round_robin\" or\n \"waterfall\":\n (1) For \"round_robin\" algorithm, available space is assigned\n one token at a time in a round-robin fashion to the inputs that still\n need some, until the limit is reached. It currently only supports\n one or two segments.\n (2) For \"waterfall\" algorithm, the allocation of the budget is done\n using a \"waterfall\" algorithm that allocates quota in a\n left-to-right manner and fills up the buckets until we run out of\n budget. It support arbitrary number of segments.\n\n **kwargs: standard arguments to Layer().\n\n Raises:\n ImportError: if importing tensorflow_text failed.\n \"\"\"\n _check_if_tf_text_installed()\n super().__init__(**kwargs)\n self.seq_length = seq_length\n if truncator not in (\"round_robin\", \"waterfall\"):\n raise ValueError(\"Only 'round_robin' and 'waterfall' algorithms are \"\n \"supported, but got %s\" % truncator)\n self.truncator = truncator\n self._init_token_ids(\n start_of_sequence_id=start_of_sequence_id,\n end_of_segment_id=end_of_segment_id,\n padding_id=padding_id,\n special_tokens_dict=special_tokens_dict)\n\n def _init_token_ids(\n self, *,\n start_of_sequence_id,\n end_of_segment_id,\n padding_id,\n special_tokens_dict):\n usage = (\"Must pass either all of start_of_sequence_id, end_of_segment_id, \"\n \"padding_id as arguments, or else a special_tokens_dict \"\n \"with those keys.\")\n special_tokens_args = [start_of_sequence_id, end_of_segment_id, padding_id]\n if special_tokens_dict is None:\n if any(x is None for x in special_tokens_args):\n return ValueError(usage)\n self.start_of_sequence_id = int(start_of_sequence_id)\n self.end_of_segment_id = int(end_of_segment_id)\n self.padding_id = int(padding_id)\n else:\n if any(x is not None for x in special_tokens_args):\n return ValueError(usage)\n self.start_of_sequence_id = int(\n special_tokens_dict[\"start_of_sequence_id\"])\n self.end_of_segment_id = int(special_tokens_dict[\"end_of_segment_id\"])\n self.padding_id = int(special_tokens_dict[\"padding_id\"])\n\n def get_config(self) -> Dict[str, Any]:\n config = super().get_config()\n config[\"seq_length\"] = self.seq_length\n config[\"start_of_sequence_id\"] = self.start_of_sequence_id\n config[\"end_of_segment_id\"] = self.end_of_segment_id\n config[\"padding_id\"] = self.padding_id\n config[\"truncator\"] = self.truncator\n return config\n\n def call(self, inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]]):\n \"\"\"Adds special tokens to pack a list of segments into BERT input Tensors.\n\n Args:\n inputs: A Python list of one or two RaggedTensors, each with the batched\n values one input segment. The j-th segment of the i-th input example\n consists of slice `inputs[j][i, ...]`.\n\n Returns:\n A nest of Tensors for use as input to the BERT TransformerEncoder.\n \"\"\"\n # BertPackInputsSavedModelWrapper relies on only calling bert_pack_inputs()\n return BertPackInputs.bert_pack_inputs(\n inputs, self.seq_length,\n start_of_sequence_id=self.start_of_sequence_id,\n end_of_segment_id=self.end_of_segment_id,\n padding_id=self.padding_id,\n truncator=self.truncator)\n\n @staticmethod\n def bert_pack_inputs(inputs: Union[tf.RaggedTensor, List[tf.RaggedTensor]],\n seq_length: Union[int, tf.Tensor],\n start_of_sequence_id: Union[int, tf.Tensor],\n end_of_segment_id: Union[int, tf.Tensor],\n padding_id: Union[int, tf.Tensor],\n truncator=\"round_robin\"):\n \"\"\"Freestanding equivalent of the BertPackInputs layer.\"\"\"\n _check_if_tf_text_installed()\n # Sanitize inputs.\n if not isinstance(inputs, (list, tuple)):\n inputs = [inputs]\n if not inputs:\n raise ValueError(\"At least one input is required for packing\")\n input_ranks = [rt.shape.rank for rt in inputs]\n if None in input_ranks or len(set(input_ranks)) > 1:\n raise ValueError(\"All inputs for packing must have the same known rank, \"\n \"found ranks \" + \",\".join(input_ranks))\n # Flatten inputs to [batch_size, (tokens)].\n if input_ranks[0] > 2:\n inputs = [rt.merge_dims(1, -1) for rt in inputs]\n # In case inputs weren't truncated (as they should have been),\n # fall back to some ad-hoc truncation.\n num_special_tokens = len(inputs) + 1\n if truncator == \"round_robin\":\n trimmed_segments = round_robin_truncate_inputs(\n inputs, seq_length - num_special_tokens)\n elif truncator == \"waterfall\":\n trimmed_segments = text.WaterfallTrimmer(\n seq_length - num_special_tokens).trim(inputs)\n else:\n raise ValueError(\"Unsupported truncator: %s\" % truncator)\n # Combine segments.\n segments_combined, segment_ids = text.combine_segments(\n trimmed_segments,\n start_of_sequence_id=start_of_sequence_id,\n end_of_segment_id=end_of_segment_id)\n # Pad to dense Tensors.\n input_word_ids, _ = text.pad_model_inputs(segments_combined, seq_length,\n pad_value=padding_id)\n input_type_ids, input_mask = text.pad_model_inputs(segment_ids, seq_length,\n pad_value=0)\n # Work around broken shape inference.\n output_shape = tf.stack([\n inputs[0].nrows(out_type=tf.int32), # batch_size\n tf.cast(seq_length, dtype=tf.int32)])\n def _reshape(t):\n return tf.reshape(t, output_shape)\n # Assemble nest of input tensors as expected by BERT TransformerEncoder.\n return dict(input_word_ids=_reshape(input_word_ids),\n input_mask=_reshape(input_mask),\n input_type_ids=_reshape(input_type_ids))\n"
] |
[
[
"tensorflow.ensure_shape",
"tensorflow.strings.regex_replace",
"tensorflow.lookup.TextFileInitializer",
"tensorflow.nn.relu",
"tensorflow.executing_eagerly",
"tensorflow.Graph",
"tensorflow.shape",
"tensorflow.io.gfile.GFile",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.compat.v1.initialize_all_tables",
"tensorflow.compat.v1.Session",
"tensorflow.map_fn",
"tensorflow.RaggedTensor.from_row_splits",
"tensorflow.init_scope",
"tensorflow.RaggedTensorSpec",
"tensorflow.lookup.StaticHashTable"
]
] |
AaronDJohnson/enterprise
|
[
"d964464bba699a9455897d890a97c40f25c5b004"
] |
[
"enterprise/signals/gp_signals.py"
] |
[
"# gp_signals.py\n\"\"\"Contains class factories for Gaussian Process (GP) signals.\nGP signals are defined as the class of signals that have a basis\nfunction matrix and basis prior vector..\n\"\"\"\n\nimport functools\nimport itertools\nimport logging\n\nimport numpy as np\nimport scipy.sparse as sps\nfrom sksparse.cholmod import cholesky\n\nfrom enterprise.signals import parameter, selections, signal_base, utils\nfrom enterprise.signals.parameter import function\nfrom enterprise.signals.selections import Selection\nfrom enterprise.signals.utils import KernelMatrix\n\n# logging.basicConfig(format=\"%(levelname)s: %(name)s: %(message)s\", level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef BasisGP(\n priorFunction,\n basisFunction,\n coefficients=False,\n combine=True,\n selection=Selection(selections.no_selection),\n name=\"\",\n):\n \"\"\"Class factory for generic GPs with a basis matrix.\"\"\"\n\n class BasisGP(signal_base.Signal):\n signal_type = \"basis\"\n signal_name = name\n signal_id = name\n\n basis_combine = combine\n\n def __init__(self, psr):\n super(BasisGP, self).__init__(psr)\n self.name = self.psrname + \"_\" + self.signal_id\n self._do_selection(psr, priorFunction, basisFunction, coefficients, selection)\n\n def _do_selection(self, psr, priorfn, basisfn, coefficients, selection):\n sel = selection(psr)\n\n self._keys = sorted(sel.masks.keys())\n self._masks = [sel.masks[key] for key in self._keys]\n self._prior, self._bases = {}, {}\n self._params, self._coefficients = {}, {}\n\n for key, mask in zip(self._keys, self._masks):\n pnames = [psr.name, name, key]\n pname = \"_\".join([n for n in pnames if n])\n\n self._prior[key] = priorfn(pname, psr=psr)\n self._bases[key] = basisfn(pname, psr=psr)\n\n for par in itertools.chain(self._prior[key]._params.values(), self._bases[key]._params.values()):\n self._params[par.name] = par\n\n if coefficients:\n # we can only create GPCoefficients parameters if the basis\n # can be constructed with default arguments\n # (and does not change size)\n self._construct_basis()\n\n for key in self._keys:\n pname = \"_\".join([n for n in [psr.name, name, key] if n])\n\n chain = itertools.chain(self._prior[key]._params.values(), self._bases[key]._params.values())\n priorargs = {par.name: self._params[par.name] for par in chain}\n\n logprior = parameter.Function(functools.partial(self._get_coefficient_logprior, key), **priorargs)\n\n size = self._slices[key].stop - self._slices[key].start\n\n cpar = parameter.GPCoefficients(logprior=logprior, size=size)(pname + \"_coefficients\")\n\n self._coefficients[key] = cpar\n self._params[cpar.name] = cpar\n\n @property\n def basis_params(self):\n \"\"\"Get any varying basis parameters.\"\"\"\n ret = []\n for basis in self._bases.values():\n ret.extend([pp.name for pp in basis.params])\n return ret\n\n # since this function has side-effects, it can only be cached\n # with limit=1, so it will run again if called with params different\n # than the last time\n @signal_base.cache_call(\"basis_params\", limit=1)\n def _construct_basis(self, params={}):\n basis, self._labels = {}, {}\n for key, mask in zip(self._keys, self._masks):\n basis[key], self._labels[key] = self._bases[key](params=params, mask=mask)\n\n nc = sum(F.shape[1] for F in basis.values())\n self._basis = np.zeros((len(self._masks[0]), nc))\n\n # TODO: should this be defined here? it will cache phi\n self._phi = KernelMatrix(nc)\n\n self._slices = {}\n nctot = 0\n for key, mask in zip(self._keys, self._masks):\n Fmat = basis[key]\n nn = Fmat.shape[1]\n self._basis[mask, nctot : nn + nctot] = Fmat\n self._slices.update({key: slice(nctot, nn + nctot)})\n nctot += nn\n\n # this class does different things (and gets different method\n # definitions) if the user wants it to model GP coefficients\n # (e.g., for a hierarchical likelihood) or if they do not\n if coefficients:\n\n def _get_coefficient_logprior(self, key, c, **params):\n self._construct_basis(params)\n\n phi = self._prior[key](self._labels[key], params=params)\n\n if phi.ndim == 1:\n return -0.5 * np.sum(c * c / phi) - 0.5 * np.sum(np.log(phi)) - 0.5 * len(phi) * np.log(2 * np.pi)\n # note: (2*pi)^(n/2) is not in signal_base likelihood\n else:\n # TO DO: this code could be embedded in KernelMatrix\n phiinv, logdet = KernelMatrix(phi).inv(logdet=True)\n return -0.5 * np.dot(c, np.dot(phiinv, c)) - 0.5 * logdet - 0.5 * phi.shape[0] * np.log(2 * np.pi)\n\n # MV: could assign this to a data member at initialization\n @property\n def delay_params(self):\n return [pp.name for pp in self.params if \"_coefficients\" in pp.name]\n\n @signal_base.cache_call([\"basis_params\", \"delay_params\"])\n def get_delay(self, params={}):\n self._construct_basis(params)\n\n c = np.zeros(self._basis.shape[1])\n for key, slc in self._slices.items():\n p = self._coefficients[key]\n c[slc] = params[p.name] if p.name in params else p.value\n\n return np.dot(self._basis, c)\n\n def get_basis(self, params={}):\n return None\n\n def get_phi(self, params):\n return None\n\n def get_phiinv(self, params):\n return None\n\n else:\n\n @property\n def delay_params(self):\n return []\n\n def get_delay(self, params={}):\n return 0\n\n def get_basis(self, params={}):\n self._construct_basis(params)\n\n return self._basis\n\n def get_phi(self, params):\n self._construct_basis(params)\n\n for key, slc in self._slices.items():\n phislc = self._prior[key](self._labels[key], params=params)\n self._phi = self._phi.set(phislc, slc)\n return self._phi\n\n def get_phiinv(self, params):\n return self.get_phi(params).inv()\n\n return BasisGP\n\n\ndef FourierBasisGP(\n spectrum,\n coefficients=False,\n combine=True,\n components=20,\n selection=Selection(selections.no_selection),\n Tspan=None,\n modes=None,\n name=\"red_noise\",\n pshift=False,\n pseed=None,\n):\n \"\"\"Convenience function to return a BasisGP class with a\n fourier basis.\"\"\"\n\n basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed)\n BaseClass = BasisGP(spectrum, basis, coefficients=coefficients, combine=combine, selection=selection, name=name)\n\n class FourierBasisGP(BaseClass):\n signal_type = \"basis\"\n signal_name = \"red noise\"\n signal_id = name\n\n return FourierBasisGP\n\n\ndef TimingModel(coefficients=False, name=\"linear_timing_model\", use_svd=False, normed=True):\n \"\"\"Class factory for marginalized linear timing model signals.\"\"\"\n\n if normed is True:\n basis = utils.normed_tm_basis()\n elif isinstance(normed, np.ndarray):\n basis = utils.normed_tm_basis(norm=normed)\n elif use_svd is True:\n if normed is not True:\n msg = \"use_svd == True is incompatible with normed != True\"\n raise ValueError(msg)\n basis = utils.svd_tm_basis()\n else:\n basis = utils.unnormed_tm_basis()\n\n prior = utils.tm_prior()\n BaseClass = BasisGP(prior, basis, coefficients=coefficients, name=name)\n\n class TimingModel(BaseClass):\n signal_type = \"basis\"\n signal_name = \"linear timing model\"\n signal_id = name + \"_svd\" if use_svd else name\n\n if coefficients:\n\n def _get_coefficient_logprior(self, key, c, **params):\n # MV: probably better to avoid this altogether\n # than to use 1e40 as in get_phi\n return 0\n\n return TimingModel\n\n\n@function\ndef ecorr_basis_prior(weights, log10_ecorr=-8):\n \"\"\"Returns the ecorr prior.\n :param weights: A vector or weights for the ecorr prior.\n \"\"\"\n return weights * 10 ** (2 * log10_ecorr)\n\n\ndef EcorrBasisModel(\n log10_ecorr=parameter.Uniform(-10, -5),\n coefficients=False,\n selection=Selection(selections.no_selection),\n name=\"basis_ecorr\",\n):\n \"\"\"Convenience function to return a BasisGP class with a\n quantized ECORR basis.\"\"\"\n\n basis = utils.create_quantization_matrix()\n prior = ecorr_basis_prior(log10_ecorr=log10_ecorr)\n BaseClass = BasisGP(prior, basis, coefficients=coefficients, selection=selection, name=name)\n\n class EcorrBasisModel(BaseClass):\n signal_type = \"basis\"\n signal_name = \"basis ecorr\"\n signal_id = name\n\n return EcorrBasisModel\n\n\ndef BasisCommonGP(priorFunction, basisFunction, orfFunction, coefficients=False, combine=True, name=\"\"):\n class BasisCommonGP(signal_base.CommonSignal):\n signal_type = \"common basis\"\n signal_name = \"common\"\n signal_id = name\n\n basis_combine = combine\n\n _orf = orfFunction(name)\n _prior = priorFunction(name)\n\n def __init__(self, psr):\n super(BasisCommonGP, self).__init__(psr)\n self.name = self.psrname + \"_\" + self.signal_id\n\n pname = \"_\".join([psr.name, name])\n self._bases = basisFunction(pname, psr=psr)\n\n self._params, self._coefficients = {}, {}\n\n for par in itertools.chain(\n self._prior._params.values(), self._orf._params.values(), self._bases._params.values()\n ):\n self._params[par.name] = par\n\n self._psrpos = psr.pos\n\n if coefficients:\n self._construct_basis()\n\n # if we're given an instantiated coefficient vector\n # that's what we will use\n if isinstance(coefficients, parameter.Parameter):\n self._coefficients[\"\"] = coefficients\n self._params[coefficients.name] = coefficients\n\n return\n\n chain = itertools.chain(\n self._prior._params.values(), self._orf._params.values(), self._bases._params.values()\n )\n priorargs = {par.name: self._params[par.name] for par in chain}\n\n logprior = parameter.Function(self._get_coefficient_logprior, **priorargs)\n\n size = self._basis.shape[1]\n\n cpar = parameter.GPCoefficients(logprior=logprior, size=size)(pname + \"_coefficients\")\n\n self._coefficients[\"\"] = cpar\n self._params[cpar.name] = cpar\n\n @property\n def basis_params(self):\n \"\"\"Get any varying basis parameters.\"\"\"\n return [pp.name for pp in self._bases.params]\n\n # since this function has side-effects, it can only be cached\n # with limit=1, so it will run again if called with params different\n # than the last time\n @signal_base.cache_call(\"basis_params\", limit=1)\n def _construct_basis(self, params={}):\n self._basis, self._labels = self._bases(params=params)\n\n if coefficients:\n\n def _get_coefficient_logprior(self, c, **params):\n # MV: for correlated GPs, the prior needs to use\n # the coefficients for all GPs together;\n # this may require parameter groups\n\n raise NotImplementedError(\"Need to implement common prior \" + \"for BasisCommonGP coefficients\")\n\n @property\n def delay_params(self):\n return [pp.name for pp in self.params if \"_coefficients\" in pp.name]\n\n @signal_base.cache_call([\"basis_params\", \"delay_params\"])\n def get_delay(self, params={}):\n self._construct_basis(params)\n\n p = self._coefficients[\"\"]\n c = params[p.name] if p.name in params else p.value\n return np.dot(self._basis, c)\n\n def get_basis(self, params={}):\n return None\n\n def get_phi(self, params):\n return None\n\n def get_phicross(cls, signal1, signal2, params):\n return None\n\n def get_phiinv(self, params):\n return None\n\n else:\n\n @property\n def delay_params(self):\n return []\n\n def get_delay(self, params={}):\n return 0\n\n def get_basis(self, params={}):\n self._construct_basis(params)\n\n return self._basis\n\n def get_phi(self, params):\n self._construct_basis(params)\n\n prior = BasisCommonGP._prior(self._labels, params=params)\n orf = BasisCommonGP._orf(self._psrpos, self._psrpos, params=params)\n\n return prior * orf\n\n @classmethod\n def get_phicross(cls, signal1, signal2, params):\n prior = BasisCommonGP._prior(signal1._labels, params=params)\n orf = BasisCommonGP._orf(signal1._psrpos, signal2._psrpos, params=params)\n\n return prior * orf\n\n return BasisCommonGP\n\n\ndef FourierBasisCommonGP(\n spectrum,\n orf,\n coefficients=False,\n combine=True,\n components=20,\n Tspan=None,\n modes=None,\n name=\"common_fourier\",\n pshift=False,\n pseed=None,\n):\n\n if coefficients and Tspan is None:\n raise ValueError(\n \"With coefficients=True, FourierBasisCommonGP \" + \"requires that you specify Tspan explicitly.\"\n )\n\n basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed)\n BaseClass = BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, combine=combine, name=name)\n\n class FourierBasisCommonGP(BaseClass):\n signal_type = \"common basis\"\n signal_name = \"common red noise\"\n signal_id = name\n\n _Tmin, _Tmax = [], []\n\n def __init__(self, psr):\n super(FourierBasisCommonGP, self).__init__(psr)\n\n if Tspan is None:\n FourierBasisCommonGP._Tmin.append(psr.toas.min())\n FourierBasisCommonGP._Tmax.append(psr.toas.max())\n\n # since this function has side-effects, it can only be cached\n # with limit=1, so it will run again if called with params different\n # than the last time\n @signal_base.cache_call(\"basis_params\", 1)\n def _construct_basis(self, params={}):\n span = Tspan if Tspan is not None else max(FourierBasisCommonGP._Tmax) - min(FourierBasisCommonGP._Tmin)\n self._basis, self._labels = self._bases(params=params, Tspan=span)\n\n return FourierBasisCommonGP\n\n\n# for simplicity, we currently do not handle Tspan automatically\ndef FourierBasisCommonGP_ephem(spectrum, components, Tspan, name=\"ephem_gp\"):\n basis = utils.createfourierdesignmatrix_ephem(nmodes=components, Tspan=Tspan)\n orf = utils.monopole_orf()\n\n return BasisCommonGP(spectrum, basis, orf, name=name)\n\n\ndef FourierBasisCommonGP_physicalephem(\n frame_drift_rate=1e-9,\n d_jupiter_mass=1.54976690e-11,\n d_saturn_mass=8.17306184e-12,\n d_uranus_mass=5.71923361e-11,\n d_neptune_mass=7.96103855e-11,\n jup_orb_elements=0.05,\n sat_orb_elements=0.5,\n model=\"setIII\",\n coefficients=False,\n name=\"phys_ephem_gp\",\n):\n \"\"\"\n Class factory for physical ephemeris corrections as a common GP.\n Individual perturbations can be excluded by setting the corresponding\n prior sigma to None.\n\n :param frame_drift_rate: Gaussian sigma for frame drift rate\n :param d_jupiter_mass: Gaussian sigma for Jupiter mass perturbation\n :param d_saturn_mass: Gaussian sigma for Saturn mass perturbation\n :param d_uranus_mass: Gaussian sigma for Uranus mass perturbation\n :param d_neptune_mass: Gaussian sigma for Neptune mass perturbation\n :param jup_orb_elements: Gaussian sigma for Jupiter orbital elem. perturb.\n :param sat_orb_elements: Gaussian sigma for Saturn orbital elem. perturb.\n :param model: vector basis used by Jupiter and Saturn perturb.;\n see PhysicalEphemerisSignal, defaults to \"setIII\"\n :param coefficients: if True, treat GP coefficients as enterprise\n parameters; if False, marginalize over them\n\n :return: BasisCommonGP representing ephemeris perturbations\n \"\"\"\n\n basis = utils.createfourierdesignmatrix_physicalephem(\n frame_drift_rate=frame_drift_rate,\n d_jupiter_mass=d_jupiter_mass,\n d_saturn_mass=d_saturn_mass,\n d_uranus_mass=d_uranus_mass,\n d_neptune_mass=d_neptune_mass,\n jup_orb_elements=jup_orb_elements,\n sat_orb_elements=sat_orb_elements,\n model=model,\n )\n\n spectrum = utils.physicalephem_spectrum()\n orf = utils.monopole_orf()\n\n return BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, name=name)\n\n\ndef WidebandTimingModel(\n dmefac=parameter.Uniform(pmin=0.1, pmax=10.0),\n log10_dmequad=parameter.Uniform(pmin=-7.0, pmax=0.0),\n dmjump=parameter.Uniform(pmin=-0.01, pmax=0.01),\n dmefac_selection=Selection(selections.no_selection),\n log10_dmequad_selection=Selection(selections.no_selection),\n dmjump_selection=Selection(selections.no_selection),\n dmjump_ref=None,\n name=\"wideband_timing_model\",\n):\n \"\"\"Class factory for marginalized linear timing model signals\n that take wideband TOAs and DMs. Currently assumes DMX for DM model.\"\"\"\n\n basis = utils.unnormed_tm_basis() # will need to normalize phi otherwise\n prior = utils.tm_prior() # standard\n BaseClass = BasisGP(prior, basis, coefficients=False, name=name)\n\n class WidebandTimingModel(BaseClass):\n signal_type = \"basis\"\n signal_name = \"wideband timing model\"\n signal_id = name\n\n basis_combine = False # should never need to be True\n\n def __init__(self, psr):\n super(WidebandTimingModel, self).__init__(psr)\n self.name = self.psrname + \"_\" + self.signal_id\n\n # make selection for DMEFACs\n dmefac_select = dmefac_selection(psr)\n self._dmefac_keys = list(sorted(dmefac_select.masks.keys()))\n self._dmefac_masks = [dmefac_select.masks[key] for key in self._dmefac_keys]\n\n # make selection for DMEQUADs\n log10_dmequad_select = log10_dmequad_selection(psr)\n self._log10_dmequad_keys = list(sorted(log10_dmequad_select.masks.keys()))\n self._log10_dmequad_masks = [log10_dmequad_select.masks[key] for key in self._log10_dmequad_keys]\n\n # make selection for DMJUMPs\n dmjump_select = dmjump_selection(psr)\n self._dmjump_keys = list(sorted(dmjump_select.masks.keys()))\n self._dmjump_masks = [dmjump_select.masks[key] for key in self._dmjump_keys]\n\n if self._dmjump_keys == [\"\"] and dmjump is not None:\n raise ValueError(\"WidebandTimingModel: can only do DMJUMP with more than one selection.\")\n\n # collect parameters\n\n self._params = {}\n\n self._dmefacs = []\n for key in self._dmefac_keys:\n pname = \"_\".join([n for n in [psr.name, key, \"dmefac\"] if n])\n param = dmefac(pname)\n\n self._dmefacs.append(param)\n self._params[param.name] = param\n\n self._log10_dmequads = []\n for key in self._log10_dmequad_keys:\n pname = \"_\".join([n for n in [psr.name, key, \"log10_dmequad\"] if n])\n param = log10_dmequad(pname)\n\n self._log10_dmequads.append(param)\n self._params[param.name] = param\n\n self._dmjumps = []\n if dmjump is not None:\n for key in self._dmjump_keys:\n pname = \"_\".join([n for n in [psr.name, key, \"dmjump\"] if n])\n if dmjump_ref is not None:\n if pname == psr.name + \"_\" + dmjump_ref + \"_dmjump\":\n fixed_dmjump = parameter.Constant(val=0.0)\n param = fixed_dmjump(pname)\n else:\n param = dmjump(pname)\n else:\n param = dmjump(pname)\n\n self._dmjumps.append(param)\n self._params[param.name] = param\n\n # copy psr quantities\n\n self._ntoas = len(psr.toas)\n self._npars = len(psr.fitpars)\n\n self._freqs = psr.freqs\n\n # collect DMX information (will be used to make phi and delay)\n\n self._dmpar = psr.dm\n self._dm = np.array(psr.flags[\"pp_dm\"], \"d\")\n self._dmerr = np.array(psr.flags[\"pp_dme\"], \"d\")\n\n check = np.zeros_like(psr.toas, \"i\")\n\n # assign TOAs to DMX bins\n\n self._dmx, self._dmindex, self._dmwhich = [], [], []\n for index, key in enumerate(sorted(psr.dmx)):\n dmx = psr.dmx[key]\n\n if not dmx[\"fit\"]:\n raise ValueError(\"WidebandTimingModel: all DMX parameters must be estimated.\")\n\n self._dmx.append(dmx[\"DMX\"])\n self._dmindex.append(psr.fitpars.index(key))\n self._dmwhich.append((dmx[\"DMXR1\"] <= psr.stoas / 86400) & (psr.stoas / 86400 < dmx[\"DMXR2\"]))\n\n check += self._dmwhich[-1]\n\n if np.sum(check) != self._ntoas:\n raise ValueError(\"WidebandTimingModel: cannot account for all TOAs in DMX intervals.\")\n\n if \"DM\" in psr.fitpars:\n raise ValueError(\"WidebandTimingModel: DM must not be estimated.\")\n\n self._ndmx = len(self._dmx)\n\n @property\n def delay_params(self):\n # cache parameters are all DMEFACS, DMEQUADS, and DMJUMPS\n return (\n [p.name for p in self._dmefacs]\n + [p.name for p in self._log10_dmequads]\n + [p.name for p in self._dmjumps]\n )\n\n @signal_base.cache_call([\"delay_params\"])\n def get_phi(self, params):\n \"\"\"Return wideband timing-model prior.\"\"\"\n\n # get DMEFAC- and DMEQUAD-adjusted DMX errors\n dme = self.get_dme(params)\n\n # initialize the timing-model \"infinite\" prior\n phi = KernelMatrix(1e40 * np.ones(self._npars, \"d\"))\n\n # fill the DMX slots with weighted errors\n for index, which in zip(self._dmindex, self._dmwhich):\n phi.set(1.0 / np.sum(1.0 / dme[which] ** 2), index)\n\n return phi\n\n def get_phiinv(self, params):\n \"\"\"Return inverse prior (using KernelMatrix inv).\"\"\"\n return self.get_phi(params).inv()\n\n @signal_base.cache_call([\"delay_params\"])\n def get_delay(self, params):\n \"\"\"Return the weighted-mean DM correction that applies for each residual.\n (Will be the same across each DM bin, before measurement-frequency weighting.)\"\"\"\n\n dm_delay = np.zeros(self._ntoas, \"d\")\n\n avg_dm = self.get_mean_dm(params)\n\n for dmx, which in zip(self._dmx, self._dmwhich):\n dm_delay[which] = avg_dm[which] - (self._dmpar + dmx)\n\n return dm_delay / (2.41e-4 * self._freqs ** 2)\n\n @signal_base.cache_call([\"delay_params\"])\n def get_dm(self, params):\n \"\"\"Return DMJUMP-adjusted DM measurements.\"\"\"\n\n return (\n sum(\n (params[jump.name] if jump.name in params else jump.value) * mask\n for jump, mask in zip(self._dmjumps, self._dmjump_masks)\n )\n + self._dm\n )\n\n @signal_base.cache_call([\"delay_params\"])\n def get_dme(self, params):\n \"\"\"Return EFAC- and EQUAD-weighted DM errors.\"\"\"\n\n return (\n sum(\n (params[efac.name] if efac.name in params else efac.value) * mask\n for efac, mask in zip(self._dmefacs, self._dmefac_masks)\n )\n ** 2\n * self._dmerr ** 2\n + (\n 10\n ** sum(\n (params[equad.name] if equad.name in params else equad.value) * mask\n for equad, mask in zip(self._log10_dmequads, self._log10_dmequad_masks)\n )\n )\n ** 2\n ) ** 0.5\n\n @signal_base.cache_call([\"delay_params\"])\n def get_mean_dm(self, params):\n \"\"\"Get weighted DMX estimates (distributed to TOAs).\"\"\"\n\n mean_dm = np.zeros(self._ntoas, \"d\")\n\n # DMEFAC- and DMJUMP-adjusted\n dm, dme = self.get_dm(params), self.get_dme(params)\n\n for which in self._dmwhich:\n mean_dm[which] = np.sum(dm[which] / dme[which] ** 2) / np.sum(1.0 / dme[which] ** 2)\n\n return mean_dm\n\n @signal_base.cache_call([\"delay_params\"])\n def get_mean_dme(self, params):\n \"\"\"Get weighted DMX uncertainties (distributed to TOAs).\n Note that get_phi computes these variances directly.\"\"\"\n\n mean_dme = np.zeros(self._ntoas, \"d\")\n\n # DMEFAC- and DMJUMP-adjusted\n dme = self.get_dme(params)\n\n for which in self._dmwhich:\n mean_dme[which] = np.sqrt(1.0 / np.sum(1.0 / dme[which] ** 2))\n\n return mean_dme\n\n @signal_base.cache_call([\"delay_params\"])\n def get_logsignalprior(self, params):\n \"\"\"Get an additional likelihood/prior term to cover terms that would not\n affect optimization, were they not dependent on DMEFAC, DMEQUAD, and DMJUMP.\"\"\"\n\n dm, dme = self.get_dm(params), self.get_dme(params)\n mean_dm, mean_dme = self.get_mean_dm(params), self.get_mean_dme(params)\n\n # now this is a bit wasteful, because it makes copies of the mean DMX and DMXERR\n # and only uses the first value, but it shouldn't cost us too much\n expterm = -0.5 * np.sum(dm ** 2 / dme ** 2)\n expterm += 0.5 * sum(mean_dm[which][0] ** 2 / mean_dme[which][0] ** 2 for which in self._dmwhich)\n\n # sum_i [-0.5 * log(dmerr**2)] = -sum_i log dmerr; same for mean_dmerr\n logterm = -np.sum(np.log(dme)) + sum(np.log(mean_dme[which][0]) for which in self._dmwhich)\n\n return expterm + logterm\n\n # these are for debugging, but should not enter the likelihood computation\n\n def get_delta_dm(self, params, use_mean_dm=False): # DM - DMX\n delta_dm = np.zeros(self._ntoas, \"d\")\n\n if use_mean_dm:\n dm = self.get_mean_dm(params)\n else:\n dm = self.get_dm(params) # DMJUMP-adjusted\n for dmx, which in zip(self._dmx, self._dmwhich):\n delta_dm[which] = dm[which] - (self._dmpar + dmx)\n\n return delta_dm\n\n def get_dm_chi2(self, params, use_mean_dm=False): # 'DM' chi-sqaured\n delta_dm = self.get_delta_dm(params, use_mean_dm=use_mean_dm)\n\n if use_mean_dm:\n dme = self.get_mean_dme(params)\n chi2 = 0.0\n for idmx, which in enumerate(self._dmwhich):\n chi2 += (delta_dm[which][0] / dme[which][0]) ** 2\n\n else:\n dme = self.get_dme(params) # DMEFAC- and DMEQUAD-adjusted\n chi2 = np.sum((delta_dm / dme) ** 2)\n\n return chi2\n\n return WidebandTimingModel\n\n\ndef MarginalizingTimingModel(name=\"marginalizing_linear_timing_model\"):\n basisFunction = utils.normed_tm_basis()\n\n class TimingModel(signal_base.Signal):\n signal_type = \"white noise\"\n signal_name = \"marginalizing linear timing model\"\n signal_id = name\n\n def __init__(self, psr):\n super(TimingModel, self).__init__(psr)\n self.name = self.psrname + \"_\" + self.signal_id\n\n pname = \"_\".join([psr.name, name])\n self.Mmat = basisFunction(pname, psr=psr)\n\n self._params = {}\n\n @property\n def ndiag_params(self):\n return []\n\n # there are none, but to be general...\n @signal_base.cache_call(\"ndiag_params\")\n def get_ndiag(self, params):\n return MarginalizingNmat(self.Mmat()[0])\n\n return TimingModel\n\n\nclass MarginalizingNmat(object):\n def __init__(self, Mmat, Nmat=0):\n self.Mmat, self.Nmat = Mmat, Nmat\n self.Mprior = Mmat.shape[1] * np.log(1e40)\n\n def __add__(self, other):\n if isinstance(other, MarginalizingNmat):\n raise ValueError(\"Cannot combine multiple MarginalizingNmat objects.\")\n elif isinstance(other, np.ndarray) or hasattr(other, \"solve\"):\n return MarginalizingNmat(self.Mmat, self.Nmat + other)\n elif other == 0:\n return self\n else:\n raise TypeError\n\n def __radd__(self, other):\n return self.__add__(other)\n\n # in Python 3.8: @functools.cached_property\n @property\n @functools.lru_cache()\n def cf(self):\n MNM = sps.csc_matrix(self.Nmat.solve(self.Mmat, left_array=self.Mmat))\n return cholesky(MNM)\n\n @signal_base.simplememobyid\n def MNr(self, res):\n return self.Nmat.solve(res, left_array=self.Mmat)\n\n @signal_base.simplememobyid\n def MNF(self, T):\n return self.Nmat.solve(T, left_array=self.Mmat)\n\n @signal_base.simplememobyid\n def MNMMNF(self, T):\n return self.cf(self.MNF(T))\n\n # we're ignoring logdet = True for two-dimensional cases, but OK\n def solve(self, right, left_array=None, logdet=False):\n if right.ndim == 1 and left_array is right:\n res = right\n\n rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=logdet)\n\n MNr = self.MNr(res)\n ret = rNr - np.dot(MNr, self.cf(MNr))\n return (ret, logdet_N + self.cf.logdet() + self.Mprior) if logdet else ret\n elif right.ndim == 1 and left_array is not None and left_array.ndim == 2:\n res, T = right, left_array\n\n TNr = self.Nmat.solve(res, left_array=T)\n return TNr - np.tensordot(self.MNMMNF(T), self.MNr(res), (0, 0))\n elif right.ndim == 2 and left_array is right:\n T = right\n\n TNT = self.Nmat.solve(T, left_array=T)\n return TNT - np.tensordot(self.MNF(T), self.MNMMNF(T), (0, 0))\n else:\n raise ValueError(\"Incorrect arguments given to MarginalizingNmat.solve.\")\n"
] |
[
[
"numpy.dot",
"numpy.log",
"numpy.ones",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
lrq3000/vaex
|
[
"923911f47e7324335dfd84bc58a14d4cd6eb7ee6"
] |
[
"tests/dataset_test.py"
] |
[
"from io import BytesIO\nimport pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\nimport pyarrow.parquet\n\nimport vaex\nimport vaex.dataset as dataset\n\nHERE = Path(__file__).parent\n\n\ndef rebuild(ds):\n # pick and unpickle\n f = BytesIO()\n picked = pickle.dump(ds, f)\n f.seek(0)\n return pickle.load(f)\n\ndef test_array_pickle():\n x = np.arange(10)\n y = x**2\n ds = dataset.DatasetArrays(x=x, y=y).hashed()\n assert ds == rebuild(ds)\n\n\ndef test_no_hash():\n x1 = np.arange(10)\n y1 = x1**2\n ds1 = dataset.DatasetArrays(x=x1, y=y1)\n\n x2 = np.arange(10)\n y2 = x2**2\n ds2 = dataset.DatasetArrays(x=x2, y=y2)\n\n with pytest.raises(ValueError, match='.*hash.*'):\n ds1 == ds2\n with pytest.raises(ValueError, match='.*hash.*'):\n ds1 == ds2.hashed()\n with pytest.raises(ValueError, match='.*hash.*'):\n ds1.hashed() == ds2\n ds1.hashed() == ds2.hashed()\n\n\ndef test_array_eq():\n x1 = np.arange(10)\n y1 = x1**2\n ds1 = dataset.DatasetArrays(x=x1, y=y1).hashed()\n assert ds1['x'] is x1\n assert ds1['y'] is y1\n\n x2 = np.arange(10)\n y2 = x2**2\n ds2 = dataset.DatasetArrays(x=x2, y=y2).hashed()\n assert ds2['x'] is x2\n assert ds2['y'] is y2\n\n # different data, but same ids/hashes\n assert ds1 == ds2\n assert ds1 == rebuild(ds2)\n\n\ndef test_array_rename():\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y).hashed()\n ds2 = ds1.renamed({'x': 'z'})\n assert ds2['y'] is y\n assert ds2['z'] is x\n\n assert 'z' in list(ds2.chunk_iterator(['z']))[0][-1]\n\n assert ds1 != ds2\n assert rebuild(ds1) != rebuild(ds2)\n\n ds3 = ds2.renamed({'z': 'x'})\n assert ds3['y'] is y\n assert ds3['x'] is x\n\n # different data, but same ids/hashes\n assert ds1 == ds3\n assert rebuild(ds1) == rebuild(ds3)\n\n\ndef test_merge():\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y).hashed()\n dsx = dataset.DatasetArrays(x=x)\n dsy = dataset.DatasetArrays(y=y)\n ds2 = dsx.merged(dsy).hashed()\n\n assert ds1 == ds2\n assert rebuild(ds1) == rebuild(ds2)\n\n with pytest.raises(NameError):\n ds2.merged(dsx)\n\n\ndef test_slice_column():\n # slicing a colunm type should keep it column type\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y)\n indices = np.array([1, 2, 5, 7, 9])\n ds2 = ds1.take(indices)\n ds3 = ds2[1:3]\n assert isinstance(ds3['x'], vaex.column.ColumnIndexed)\n\n\ndef test_slice():\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y)\n ds2 = ds1[1:8]\n ds2b = ds1[1:8]\n ds2c = ds1[1:9]\n assert ds1.hashed() != ds2.hashed()\n assert ds2.hashed() == ds2b.hashed()\n assert ds2.hashed() != ds2c.hashed()\n assert ds1.row_count == 10\n assert ds2.row_count == 7\n assert ds2b.row_count == 7\n assert ds2c.row_count == 8\n assert ds2['x'].tolist() == x[1:8].tolist()\n\n\n ds3 = dataset.DatasetArrays(x=x[1:8], y=y[1:8])\n\n assert ds2.hashed() != ds3.hashed()\n assert rebuild(ds1).hashed() != rebuild(ds2).hashed()\n\n\ndef test_take():\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y)\n indices = np.array([1, 2, 5])\n indices_other = np.array([1, 2, 6])\n ds2 = ds1.take(indices)\n ds2b = ds1.take(indices)\n ds2c = ds1.take(indices_other)\n assert ds1.hashed() != ds2.hashed()\n assert ds2.hashed() == ds2b.hashed()\n assert ds2.hashed() != ds2c.hashed()\n assert ds1.row_count == 10\n assert ds2.row_count == len(indices)\n assert ds2b.row_count == len(indices)\n assert ds2c.row_count == len(indices_other)\n assert ds2['x'].tolist() == x[indices].tolist()\n\n ds3 = dataset.DatasetArrays(x=x[indices], y=y[indices])\n\n assert ds2.hashed() != ds3.hashed()\n assert rebuild(ds1).hashed() != rebuild(ds2).hashed()\n\n\ndef test_project():\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y)\n ds2 = ds1.project('x')\n ds3 = dataset.DatasetArrays(x=x)\n assert ds1.hashed() != ds2.hashed()\n assert ds2.hashed() == ds3.hashed()\n assert rebuild(ds2).hashed() == rebuild(ds3).hashed()\n\n\ndef test_drop():\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y)\n ds2 = ds1.dropped('x')\n assert 'x' not in ds2\n ds3 = ds1.dropped('y')\n assert 'y' not in ds3\n assert ds1.hashed() == ds2.merged(ds3).hashed()\n assert rebuild(ds1).hashed() == rebuild(ds2.merged(ds3)).hashed()\n\n\ndef test_concat():\n x = np.arange(10)\n y = x**2\n ds = dataset.DatasetArrays(x=x, y=y)\n mid = 4\n ds1 = dataset.DatasetArrays(x=x[:mid], y=y[:mid])\n ds2 = dataset.DatasetArrays(y=y[mid:], x=x[mid:]) # order should not matter\n dsc = ds1.concat(ds2)\n assert ds.row_count == dsc.row_count\n assert dsc.row_count == ds1.row_count + ds2.row_count\n\n # an empty list of columns follows a different codepath\n assert list(dsc.chunk_iterator([])) == [(0, 10, {})]\n assert list(dsc.chunk_iterator([], start=5, end=10)) == [(0, 5, {})]\n\n\ndef test_example():\n df = vaex.example().hashed()\n path_data = HERE / 'data' / 'test.hdf5'\n assert isinstance(df, vaex.dataframe.DataFrame)\n assert isinstance(df.dataset, vaex.hdf5.dataset.Hdf5MemoryMapped)\n assert rebuild(df.dataset) == df.dataset\n\n\ndef test_hashable():\n # tests if we can use datasets as keys of dicts\n x = np.arange(10)\n y = x**2\n ds1 = dataset.DatasetArrays(x=x, y=y).hashed()\n df = vaex.example()\n some_dict = {ds1: '1', df.dataset: '2'}\n assert some_dict[ds1] == '1'\n assert some_dict[df.dataset] == '2'\n\n assert some_dict[rebuild(ds1)] == '1'\n assert some_dict[rebuild(df.dataset)] == '2'\n\n\ndef test_cache_hash():\n # TODO: what if the directory is not writable?\n # ds1 = dataset.DatasetArrays(x=x, y=y)\n path_data = HERE / 'data' / 'test.hdf5'\n if path_data.exists():\n path_data.unlink()\n path_hashes = HERE / 'data' / 'test.hdf5.d' / 'hashes.yaml'\n if path_hashes.exists():\n path_hashes.unlink()\n\n df = vaex.example()[:10]\n df.export(str(path_data))\n df2 = vaex.open(str(path_data))\n assert df2.dataset._hash_calculations == 0\n assert not path_hashes.exists()\n df2 = df2.hashed()\n assert df2.dataset._hash_calculations > 0\n assert path_hashes.exists()\n\n # and pickling\n ds = df2.dataset\n ds2 = rebuild(ds)\n assert ds2._hash_calculations == 0\n assert ds == ds2\n\n df3 = vaex.open(str(path_data))\n ds3 = df3.dataset\n assert ds3._hash_calculations == 0\n assert ds3 == ds2\n\n\ndef test_chunk_iterator():\n x = np.arange(10)\n y = x**2\n ds = dataset.DatasetArrays(x=x, y=y)\n chunk_it = ds.chunk_iterator(['y'], chunk_size=4)\n i1, i2, chunk0 = next(chunk_it)\n assert chunk0['y'].tolist() == y[0:4].tolist()\n assert i1 == 0\n assert i2 == 4\n\n i1, i2, chunk1 = next(chunk_it)\n assert chunk1['y'].tolist() == y[4:8].tolist()\n assert i1 == 4\n assert i2 == 8\n\n i1, i2, chunk2 = next(chunk_it)\n assert chunk2['y'].tolist() == y[8:].tolist()\n assert i1 == 8\n assert i2 == 10\n\n\n@pytest.mark.parametrize(\"l1\", list(range(1, 6)))\n@pytest.mark.parametrize(\"l2\", list(range(1, 6)))\ndef test_concat_chunk_iterator(l1, l2):\n i1 = 0\n i2 = i1 + l1\n i3 = i2 + l2\n x = np.arange(10)\n y = x**2\n g = x // 3\n ds = vaex.dataset.DatasetArrays(x=x, y=y, g=g)\n df_original = df = vaex.from_dataset(ds)\n df1 = df[i1:i2]\n df2 = df[i2:i3]\n df3 = df[i3:]\n df = vaex.concat([df1, df2, df3])\n ds_full = ds = df.dataset\n\n # very similar to the arrow/datase_test.py parquet test\n iter = ds.chunk_iterator(['x', 'y'], chunk_size=2)\n for i in range(5):\n i1, i2, chunks = next(iter)\n assert i1 == i*2\n assert i2 == (i + 1) * 2\n chunks['x'].tolist() == x[i1:i2].tolist()\n chunks['y'].tolist() == y[i1:i2].tolist()\n\n\n # no columns\n iter = ds.chunk_iterator([], chunk_size=2)\n for i in range(5):\n i1, i2, chunks = next(iter)\n assert i1 == i*2\n assert i2 == (i + 1) * 2\n\n ds = ds[1:10]\n assert 'x' in ds\n assert ds.row_count == 9\n iter = ds.chunk_iterator(['x', 'y'], chunk_size=2)\n for i in range(5):\n i1, i2, chunks = next(iter)\n if i == 4:\n assert i1 == 8\n assert i2 == 9\n else:\n assert i1 == i*2\n assert i2 == (i + 1) * 2\n # chunks = chunks\n chunks['x'].tolist() == x[i1:i2].tolist()\n chunks['y'].tolist() == y[i1:i2].tolist()\n\n ds = ds[1:9]\n assert ds.row_count == 8\n iter = ds.chunk_iterator(['x', 'y'], chunk_size=2)\n for i in range(4):\n i1, i2, chunks = next(iter)\n assert i1 == i*2\n assert i2 == (i + 1) * 2\n chunks['x'].tolist() == x[i1:i2].tolist()\n chunks['y'].tolist() == y[i1:i2].tolist()\n\n # no columns\n iter = ds.chunk_iterator([], chunk_size=2)\n for i in range(4):\n i1, i2, chunks = next(iter)\n assert i1 == i*2\n assert i2 == (i + 1) * 2\n\n # again, but here we skip of total of a chunk_size at the end\n ds = ds_full[:8]\n # import pdb; pdb.set_trace()\n assert ds.row_count == 8\n iter = ds.chunk_iterator(['x', 'y'], chunk_size=2)\n for i in range(4):\n i1, i2, chunks = next(iter)\n assert i1 == i*2\n assert i2 == (i + 1) * 2\n chunks['x'].tolist() == x[i1:i2].tolist()\n chunks['y'].tolist() == y[i1:i2].tolist()\n\n\n for i in range(9):\n for j in range(i+1, 10):\n ds = ds_full.slice(i, j)\n values = []\n for i1, i2, chunks in ds.chunk_iterator(['x']):\n values.extend(chunks['x'].tolist())\n assert x[i:j].tolist() == values\n\n assert df.x.tolist() == x.tolist()\n assert df.g.tolist() == g.tolist()\n\n ds_dropped = ds.dropped('x')\n assert 'x' not in ds_dropped\n\n"
] |
[
[
"numpy.arange",
"numpy.array"
]
] |
beringresearch/label-studio
|
[
"ab8b9b5605ec9eab76c4f90967874898239ed94e"
] |
[
"label_studio/ml/examples/pytorch_transfer_learning.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport time\nimport os\nimport numpy as np\nimport requests\nimport io\nimport hashlib\n\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import models, transforms\n\nfrom label_studio.ml import LabelStudioMLBase\nfrom label_studio.ml.utils import get_single_tag_keys, get_choice, is_skipped\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\nimage_size = 224\nimage_transforms = transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\nimage_cache_dir = os.path.join(os.path.dirname(__file__), 'image-cache')\nos.makedirs(image_cache_dir, exist_ok=True)\n\n\ndef get_transformed_image(url):\n is_local_file = url.startswith('http://localhost:')\n if is_local_file:\n filename, dir_path = url.split('/static/')[1].split('?d=')\n with open(os.path.join(dir_path, filename)) as f:\n image = Image.open(f).convert('RGB')\n else:\n cached_file = os.path.join(image_cache_dir, hashlib.md5(url.encode()).hexdigest())\n if os.path.exists(cached_file):\n with open(cached_file, mode='rb') as f:\n image = Image.open(f).convert('RGB')\n else:\n r = requests.get(url, stream=True)\n r.raise_for_status()\n with io.BytesIO(r.content) as f:\n image = Image.open(f).convert('RGB')\n with io.open(cached_file, mode='wb') as fout:\n fout.write(r.content)\n return image_transforms(image)\n\n\nclass ImageClassifierDataset(Dataset):\n\n def __init__(self, image_urls, image_classes):\n self.classes = list(set(image_classes))\n self.class_to_label = {c: i for i, c in enumerate(self.classes)}\n\n self.images, self.labels = [], []\n for image_url, image_class in zip(image_urls, image_classes):\n try:\n image = get_transformed_image(image_url)\n except Exception as exc:\n print(exc)\n continue\n self.images.append(image)\n self.labels.append(self.class_to_label[image_class])\n\n def __getitem__(self, index):\n return self.images[index], self.labels[index]\n\n def __len__(self):\n return len(self.images)\n\n\nclass ImageClassifier(object):\n\n def __init__(self, num_classes, freeze_extractor=False):\n self.model = models.resnet18(pretrained=True)\n if freeze_extractor:\n print('Transfer learning with a fixed ConvNet feature extractor')\n for param in self.model.parameters():\n param.requires_grad = False\n else:\n print('Transfer learning with a full ConvNet finetuning')\n\n num_ftrs = self.model.fc.in_features\n self.model.fc = nn.Linear(num_ftrs, num_classes)\n\n self.model = self.model.to(device)\n\n self.criterion = nn.CrossEntropyLoss()\n if freeze_extractor:\n self.optimizer = optim.SGD(self.model.fc.parameters(), lr=0.001, momentum=0.9)\n else:\n self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)\n\n self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=7, gamma=0.1)\n\n def save(self, path):\n torch.save(self.model.state_dict(), path)\n\n def load(self, path):\n self.model.load_state_dict(torch.load(path))\n self.model.eval()\n\n def predict(self, image_urls):\n images = torch.stack([get_transformed_image(url) for url in image_urls])\n with torch.no_grad():\n return self.model(images).data.numpy()\n\n def train(self, dataloader, num_epochs=5):\n since = time.time()\n\n self.model.train()\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n running_loss = 0.0\n running_corrects = 0\n # Iterate over data.\n for inputs, labels in dataloader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n self.optimizer.zero_grad()\n outputs = self.model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n self.scheduler.step(epoch)\n\n epoch_loss = running_loss / len(dataloader.dataset)\n epoch_acc = running_corrects.double() / len(dataloader.dataset)\n\n print('Train Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\n return self.model\n\n\nclass ImageClassifierAPI(LabelStudioMLBase):\n\n def __init__(self, freeze_extractor=False, **kwargs):\n super(ImageClassifierAPI, self).__init__(**kwargs)\n self.from_name, self.to_name, self.value, self.classes = get_single_tag_keys(\n self.parsed_label_config, 'Choices', 'Image')\n\n if self.train_output:\n self.classes = self.train_output['classes']\n self.model = ImageClassifier(len(self.classes), freeze_extractor)\n self.model.load(self.train_output['model_path'])\n else:\n self.model = ImageClassifier(len(self.classes), freeze_extractor)\n\n def predict(self, tasks, **kwargs):\n image_urls = [task['data'][self.value] for task in tasks]\n logits = self.model.predict(image_urls)\n predicted_label_indices = np.argmax(logits, axis=1)\n predicted_scores = logits[np.arange(len(predicted_label_indices)), predicted_label_indices]\n predictions = []\n for idx, score in zip(predicted_label_indices, predicted_scores):\n predicted_label = self.classes[idx]\n # prediction result for the single task\n result = [{\n 'from_name': self.from_name,\n 'to_name': self.to_name,\n 'type': 'choices',\n 'value': {'choices': [predicted_label]}\n }]\n\n # expand predictions with their scores for all tasks\n predictions.append({'result': result, 'score': float(score)})\n\n return predictions\n\n def fit(self, completions, workdir=None, batch_size=32, num_epochs=10, **kwargs):\n image_urls, image_classes = [], []\n print('Collecting completions...')\n for completion in completions:\n if is_skipped(completion):\n continue\n image_urls.append(completion['data'][self.value])\n image_classes.append(get_choice(completion))\n\n print('Creating dataset...')\n dataset = ImageClassifierDataset(image_urls, image_classes)\n dataloader = DataLoader(dataset, shuffle=True, batch_size=batch_size)\n\n print('Train model...')\n self.model.train(dataloader, num_epochs=num_epochs)\n\n print('Save model...')\n model_path = os.path.join(workdir, 'model.pt')\n self.model.save(model_path)\n\n return {'model_path': model_path, 'classes': dataset.classes}"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.load",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.nn.Linear",
"numpy.argmax",
"torch.no_grad",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.StepLR"
]
] |
JingChunzhen/Paddle
|
[
"1bce7caabc1c5e55b1fa13edb19719c397803c43"
] |
[
"python/paddle/fluid/tests/unittests/test_inplace.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport unittest\nimport numpy as np\n\nimport paddle\nimport paddle.fluid.core as core\n\n\nclass TestInplace(unittest.TestCase):\n def test_forward_version(self):\n with paddle.fluid.dygraph.guard():\n var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32))\n self.assertEqual(var.inplace_version, 0)\n\n var[0] = 1.1\n self.assertEqual(var.inplace_version, 1)\n\n paddle.assign(paddle.ones(shape=[3]), var)\n\n # NOTE(liym27): assign(input, output) is an inplace operation for output.\n # There is inplace-related processing for api assign, var.inplace_version should be 2 not 1.\n self.assertEqual(var.inplace_version, 2)\n\n var[2] = 3\n self.assertEqual(var.inplace_version, 3)\n\n def test_backward_error(self):\n # It raises an error because the inplace operator will result\n # in incorrect gradient computation.\n with paddle.fluid.dygraph.guard():\n var_a = paddle.ones(shape=[4, 2, 3], dtype=\"float32\")\n var_a.stop_gradient = False\n\n var_b = var_a**2\n\n # Here, the gradient computation will use the value of var_b\n var_c = var_b**2\n var_b[1:2] = 3.3 # var_b is modified inplace after using it\n\n var_d = var_b**2\n\n loss = paddle.nn.functional.relu(var_c + var_d)\n with self.assertRaisesRegexp(\n RuntimeError,\n \"received tensor_version:{} != wrapper_version_snapshot:{}\".\n format(1, 0)):\n loss.backward()\n\n def test_backward_success_1(self):\n # var_b is modified inplace before using it, the inplace operator doesn't result\n # in incorrect gradient computation.\n with paddle.fluid.dygraph.guard():\n var_a = paddle.ones(shape=[4, 2, 3], dtype=\"float32\")\n var_a.stop_gradient = False\n\n var_b = var_a**2\n var_b[1:2] = 3 # var_b is modified inplace before using it\n\n # Here, the gradient computation will use the value of var_b\n var_c = var_b**2\n loss = var_c.sum()\n loss.backward()\n\n def test_backward_success_2(self):\n # Although var_b is modified inplace after using it, it does not used in gradient computation.\n # The inplace operator doesn't result in incorrect gradient computation.\n with paddle.fluid.dygraph.guard():\n var_a = paddle.ones(shape=[4, 2, 3], dtype=\"float32\")\n var_a.stop_gradient = False\n\n var_b = var_a**2\n\n var_b[1:2] = 3 # var_b is modified inplace before using it\n\n var_c = var_b + var_b # Here, the grad op of sum doesn't use the value of var_b\n loss = var_c.sum()\n\n var_b[1:2] = 3 # var_b is modified inplace after using it\n\n loss.backward()\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.ones"
]
] |
cbi-bioinfo/AGRAP
|
[
"f2ff7817e095109a351c71cf3d92f503a76fbeeb"
] |
[
"workspace/pythonScripts/feature_selection_terminal.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_selection import RFE\nfrom sklearn.svm import SVR\n\n\n# In[ ]:\n\n\n\nfilepath = sys.argv[1]\nfilename = sys.argv[2]\nn = int(sys.argv[3])\n'''\nfilepath = \"C:/Users/JIHYEON_KIM/Documents/workspace/rda/files/\"\nfilename = \"input3.csv\"\nn=5\n'''\n\ndata = pd.read_csv(filepath + \"/\" + filename, encoding='UTF-8')\n\n\n# In[ ]:\n\n\n#모든 feature에 대해 결측치 갖는 샘플 제거\ndata_0 =data.dropna(axis=0,how='all')\n\n#label 값이 결측치인 샘플 제거 \ndata_l =data.loc[data[\"label\"].notnull(), :]\n\n#50%이상이 결측치인 feature 삭제\ndata_f =data_l.dropna(axis=1,thresh=data_l.shape[0]/2)\n\n#나머지는 각 label에 대해서 median imputation 수행\ndata_na_remove = data_f.fillna(data_f.mean())\n\n\n# In[ ]:\n\n\nX = data_na_remove.iloc[:,1:]\nY = data_na_remove.iloc[:,0]\n\n\n# In[ ]:\n\n\n#random forest\nforest = RandomForestClassifier(n_estimators=100, n_jobs=-1)\nforest.fit(X, Y)\nfeature_list = pd.concat([pd.Series(X.columns), pd.Series(forest.feature_importances_)], axis=1)\nfeature_list.columns = ['features_name', 'importance']\nfeature_list_rf =feature_list.sort_values(\"importance\", ascending =False)\nrf_select = feature_list_rf.index[:(n)]\n\n\n# In[ ]:\n\n\n#L1 based Linear SVC\nlsvc = LinearSVC(max_iter=1000000).fit(X, Y)\nmodel = SelectFromModel(lsvc,prefit=True)\nl1_select = model.get_support()\n\n\n# In[ ]:\n\n\n#L1 based logistic regression\nlr = LogisticRegression(max_iter=10000)\nselector = SelectFromModel(estimator=lr).fit(X, Y)\nlr_select = selector.get_support()\n\n\n# In[ ]:\n\n\n#RFE\nestimator = SVR(kernel=\"linear\")\nselector = RFE(estimator, n_features_to_select= n, step=1)\nselector = selector.fit(X,Y)\nrfe_select = selector.support_\n\n\n# In[ ]:\n\n\nrf_list = X.columns[rf_select]\nl1_list = X.columns[l1_select]\nlr_list = X.columns[lr_select]\nrfe_list = X.columns[rfe_select]\nfeature_list = pd.concat([pd.Series(rf_list),pd.Series(l1_list), pd.Series(lr_list),pd.Series(rfe_list)], axis=1)\nfeature_list.columns = ['Random Forest','L1 based LinearSVC', 'L1 based Log Regression','RFE']\n\n\n# In[ ]:\n\n\nfeature_list.to_csv('./public/files/feature_selection_'+filename+'_.csv',na_rep='',encoding='utf-8')\nfeature_list.to_csv('./public/files/feature_selection_result_'+filename+'_.csv',na_rep='',encoding='cp949')\n\n\n"
] |
[
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"pandas.Series",
"sklearn.svm.SVR",
"sklearn.feature_selection.RFE",
"sklearn.svm.LinearSVC",
"sklearn.feature_selection.SelectFromModel"
]
] |
khurrammaqbool/SVDB
|
[
"a62a9308c308d1410e68fa9c16d0a4044aacee8b"
] |
[
"tests/test_dbscan.py"
] |
[
"import unittest\nimport numpy\n\nfrom svdb.DBSCAN import main\n\n\nclass TestDBSCAN(unittest.TestCase):\n\n #test that distant points are not merged\n def test_distant_points(self):\n data = numpy.array([[1,1],[1,101]])\n epsilon=100\n m=2\n result=main(data,epsilon,m)\n assert (result[0] == -1 and result[1] == -1)\n\n #test that close points are merged\n def test_close_points(self):\n data = numpy.array([[1,1],[1,101]])\n epsilon=200\n m=2\n result=main(data,epsilon,m)\n assert (result[0] == 0 and result[1] == 0)\n\n #test that small clusters smaller than m are not merged\n def test_small_cluster(self):\n data = numpy.array([[1,1],[1,1],[1,101],[1,101]])\n epsilon=100\n m=3\n result=main(data,epsilon,m)\n assert (result[0] == -1 and result[1] == -1)\n"
] |
[
[
"numpy.array"
]
] |
marcio081010/TensorFlowProject
|
[
"ea0b6be8d63c3b8d86ef7135613170275cff017d"
] |
[
"opencv_group_detection.py"
] |
[
"import os\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom picamera.array import PiRGBArray\r\nfrom picamera import PiCamera\r\nimport tensorflow as tf\r\nimport argparse\r\nimport sys\r\nimport time\r\nimport csv\r\n\r\n######## BOILERPLATE CODE #######\r\n# Set up camera constants\r\n#IM_WIDTH = 1280\r\n#IM_HEIGHT = 720\r\nIM_WIDTH = 640 # Use smaller resolution for\r\nIM_HEIGHT = 480 # slightly faster framerate\r\n\r\n# Select camera type (if user enters --usbcam when calling this script,\r\n# a USB webcam will be used)\r\ncamera_type = 'picamera'\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--usbcam', help='Use a USB webcam instead of picamera',\r\n action='store_true')\r\nargs = parser.parse_args()\r\nif args.usbcam:\r\n camera_type = 'usb'\r\n\r\n# This is needed since the working directory is the object_detection folder.\r\nsys.path.append('..')\r\n\r\n# Import utilites\r\nfrom object_detection.utils import label_map_util\r\nfrom object_detection.utils import visualization_utils as vis_util\r\n\r\n# Name of the directory containing the object detection module we're using\r\nMODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'\r\n\r\n# Grab path to current working directory\r\nCWD_PATH = os.getcwd()\r\n\r\n# Path to frozen detection graph .pb file, which contains the model that is used\r\n# for object detection.\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n# Path to label map file\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')\r\n\r\n# Number of classes the object detector can identify\r\nNUM_CLASSES = 90\r\n\r\n## Load the label map.\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n# Load the Tensorflow model into memory.\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.compat.v1.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.compat.v1.Session(graph=detection_graph)\r\n\r\n\r\n# Define input and output tensors (i.e. data) for the object detection classifier\r\n\r\n# Input tensor is the image\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n# Output tensors are the detection boxes, scores, and classes\r\n# Each box represents a part of the image where a particular object was detected\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n# Each score represents level of confidence for each of the objects.\r\n# The score is shown on the result image, together with the class label.\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\n# Number of objects detected\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n#Initialises the list for output\r\noutput = []\r\n\r\n# creating a fucntion \r\ndef group_counting():\r\n \r\n # Initialize frame rate calculation\r\n frame_rate_calc = 1\r\n freq = cv2.getTickFrequency()\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n \r\n # Initialize Picamera and grab reference to the raw capture\r\n camera = PiCamera()\r\n camera.resolution = (IM_WIDTH,IM_HEIGHT)\r\n camera.framerate = 10\r\n rawCapture = PiRGBArray(camera, size=(IM_WIDTH,IM_HEIGHT))\r\n rawCapture.truncate(0)\r\n\r\n #Standard setup for the live object viewer\r\n for frame1 in camera.capture_continuous(rawCapture, format=\"bgr\",use_video_port=True):\r\n t1 = cv2.getTickCount()\r\n \r\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\r\n # i.e. a single-column array, where each item in the column has the pixel RGB value\r\n frame = np.copy(frame1.array)\r\n frame.setflags(write=1)\r\n frame_expanded = np.expand_dims(frame, axis=0)\r\n\r\n # Perform the actual detection by running the model with the image as input\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: frame_expanded})\r\n\r\n # Visualizing the results of the detection - \r\n # this is only for show and to visually verify results if needed\r\n vis_util.visualize_boxes_and_labels_on_image_array(\r\n frame,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n use_normalized_coordinates=True,\r\n line_thickness=6,\r\n min_score_thresh=0.50)\r\n \r\n ####### OBJECT SELECTION AND COUNTING CODE STARTS HERE #######\r\n # pulling raw output from object detection. Creates a list of dicts \r\n # with details of each of the objects meeting the threshold in a given frame.\r\n Validobj = [category_index.get(value) for index, value in enumerate (classes[0]) if scores [0,index]>0.5]\r\n \r\n # Choose your object\r\n to_detect = 'person' \r\n \r\n # Creates a log if the chosen object has been detected.\r\n if Validobj:\r\n data = [i[\"name\"] for i in Validobj]\r\n # If in the given frame the number of a given object detected meets the condition then a log is made \r\n if data.count(to_detect)>2:\r\n # Writes a line with how many of the object was detected along with a timestamp\r\n Summary = [\"There is a group of \" + str(data.count(to_detect)) + \" people\" ,time.ctime()]\r\n print(Summary)\r\n \r\n evidence_stamp = [data.count(to_detect),to_detect,time.ctime()]\r\n output.append(evidence_stamp)\r\n\r\n # Take a picture for authorities\r\n cv2.imwrite(\"evidence.bmp\", frame)\r\n time.sleep(5) #- alter depending on footfall or replace with object tracking to reduce overcounting\r\n \r\n # Used to dispay framerate in live viewer\r\n cv2.putText(frame,\"FPS: {0:.2f}\".format(frame_rate_calc),(30,50),font,1,(255,255,0),2,cv2.LINE_AA)\r\n\r\n # All the results have been drawn on the frame, so it's time to display it.\r\n cv2.imshow('Object detector', frame)\r\n\r\n t2 = cv2.getTickCount()\r\n time1 = (t2-t1)/freq\r\n frame_rate_calc = 1/time1\r\n \r\n # Press 'q' to quit\r\n if cv2.waitKey(1) == ord('q'):\r\n # This writes the data gathered in the output to a logfile\r\n with open('output.csv','w',newline = '\\n') as file:\r\n writer = csv.writer(file)\r\n writer.writerows(output)\r\n break\r\n\r\n rawCapture.truncate(0)\r\n\r\n camera.close()\r\n\r\ncv2.destroyAllWindows()\r\n\r\ntry:\r\n while True:\r\n group_counting()\r\nexcept KeyboardInterrupt:\r\n sys.exit()"
] |
[
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"numpy.expand_dims",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.compat.v1.Session",
"numpy.copy",
"tensorflow.compat.v1.GraphDef"
]
] |
GabrielSCabrera/ComputationalPhysics2
|
[
"a840b97b651085090f99bf6a11abab57100c2e85"
] |
[
"doc/src/MCsummary/src/mpqdot.py"
] |
[
"# 2-electron VMC code for 2dim quantum dot with importance sampling\n# No Coulomb interaction\n# Using gaussian rng for new positions and Metropolis- Hastings \n# Energy minimization using standard gradient descent \n\n# Common imports\nimport os\n\n# Where to save the figures and data files\nPROJECT_ROOT_DIR = \"Results\"\nFIGURE_ID = \"Results/FigureFiles\"\n\nif not os.path.exists(PROJECT_ROOT_DIR):\n os.mkdir(PROJECT_ROOT_DIR)\n\nif not os.path.exists(FIGURE_ID):\n os.makedirs(FIGURE_ID)\n\ndef image_path(fig_id):\n return os.path.join(FIGURE_ID, fig_id)\n\n\ndef save_fig(fig_id):\n plt.savefig(image_path(fig_id) + \".png\", format='png')\n\n\nfrom math import exp, sqrt\nfrom random import random, seed, normalvariate\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport sys\nfrom numba import jit\nfrom scipy.optimize import minimize\nimport multiprocessing as mp\n\n# Trial wave function for the 2-electron quantum dot in two dims\ndef WaveFunction(r,alpha):\n r1 = r[0,0]**2 + r[0,1]**2\n r2 = r[1,0]**2 + r[1,1]**2\n return exp(-0.5*alpha*(r1+r2))\n\n# Local energy for the 2-electron quantum dot in two dims, using analytical local energy\ndef LocalEnergy(r,alpha):\n \n r1 = (r[0,0]**2 + r[0,1]**2)\n r2 = (r[1,0]**2 + r[1,1]**2)\n return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha\n\n# Derivate of wave function ansatz as function of variational parameters\ndef DerivativeWFansatz(r,alpha):\n \n r1 = (r[0,0]**2 + r[0,1]**2)\n r2 = (r[1,0]**2 + r[1,1]**2)\n WfDer = -0.5*(r1+r2)\n return WfDer\n\n# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector\ndef QuantumForce(r,alpha):\n\n qforce = np.zeros((NumberParticles,Dimension), np.double)\n qforce[0,:] = -2*r[0,:]*alpha\n qforce[1,:] = -2*r[1,:]*alpha\n return qforce\n \n# Computing the derivative of the energy and the energy \n# jit decorator tells Numba to compile this function.\n# The argument types will be inferred by Numba when function is called.\n@jit\ndef EnergyMinimization(alpha):\n\n NumberMCcycles= 1000\n # Parameters in the Fokker-Planck simulation of the quantum force\n D = 0.5\n TimeStep = 0.05\n # positions\n PositionOld = np.zeros((NumberParticles,Dimension), np.double)\n PositionNew = np.zeros((NumberParticles,Dimension), np.double)\n # Quantum force\n QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)\n QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)\n\n # seed for rng generator \n seed()\n energy = 0.0\n DeltaE = 0.0\n EnergyDer = 0.0\n DeltaPsi = 0.0\n DerivativePsiE = 0.0\n #Initial position\n for i in range(NumberParticles):\n for j in range(Dimension):\n PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)\n wfold = WaveFunction(PositionOld,alpha)\n QuantumForceOld = QuantumForce(PositionOld,alpha)\n\n #Loop over MC MCcycles\n for MCcycle in range(NumberMCcycles):\n #Trial position moving one particle at the time\n for i in range(NumberParticles):\n for j in range(Dimension):\n PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\\\n QuantumForceOld[i,j]*TimeStep*D\n wfnew = WaveFunction(PositionNew,alpha)\n QuantumForceNew = QuantumForce(PositionNew,alpha)\n GreensFunction = 0.0\n for j in range(Dimension):\n GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\\\n\t (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\\\n PositionNew[i,j]+PositionOld[i,j])\n \n GreensFunction = 1.0#exp(GreensFunction)\n ProbabilityRatio = GreensFunction*wfnew**2/wfold**2\n #Metropolis-Hastings test to see whether we accept the move\n if random() <= ProbabilityRatio:\n for j in range(Dimension):\n PositionOld[i,j] = PositionNew[i,j]\n QuantumForceOld[i,j] = QuantumForceNew[i,j]\n wfold = wfnew\n DeltaE = LocalEnergy(PositionOld,alpha)\n DerPsi = DerivativeWFansatz(PositionOld,alpha)\n DeltaPsi +=DerPsi\n energy += DeltaE\n DerivativePsiE += DerPsi*DeltaE\n \n # We calculate mean values\n energy /= NumberMCcycles\n DerivativePsiE /= NumberMCcycles\n DeltaPsi /= NumberMCcycles\n EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)\n return energy, EnergyDer\n\n\n#Here starts the main program with variable declarations\nNumberParticles = 2\nDimension = 2\n# guess for variational parameters\nx0 = 0.5\n# Set up iteration using stochastic gradient method\nEnergy =0 ; EnergyDer = 0\npool = mp.Pool(processes=2)\nEnergy, EnergyDer = EnergyMinimization(x0)\n\n# No adaptive search for a minimum\neta = 0.5\nNiterations = 50\n\nEnergies = np.zeros(Niterations)\nEnergyDerivatives = np.zeros(Niterations)\nAlphaValues = np.zeros(Niterations)\nTotiterations = np.zeros(Niterations)\n\nfor iter in range(Niterations):\n gradients = EnergyDer\n x0 -= eta*gradients\n Energy, EnergyDer = EnergyMinimization(x0)\n Energies[iter] = Energy\n EnergyDerivatives[iter] = EnergyDer\n AlphaValues[iter] = x0\n Totiterations[iter] = iter\n\nplt.subplot(2, 1, 1)\nplt.plot(Totiterations, Energies, 'o-')\nplt.title('Energy and energy derivatives')\nplt.ylabel('Dimensionless energy')\nplt.subplot(2, 1, 2)\nplt.plot(Totiterations, EnergyDerivatives, '.-')\nplt.xlabel(r'$\\mathrm{Iterations}$', fontsize=15)\nplt.ylabel('Energy derivative')\nsave_fig(\"QdotNonint\")\nplt.show()\n#nice printout with Pandas\nimport pandas as pd\nfrom pandas import DataFrame\ndata ={'Alpha':AlphaValues, 'Energy':Energies,'Derivative':EnergyDerivatives}\n\nframe = pd.DataFrame(data)\nprint(frame)\n"
] |
[
[
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] |
dulex123/dqn-atari
|
[
"f5c8b6c0fd32becd026b83addc7b4b4f3004c0fc"
] |
[
"env_loop.py"
] |
[
"import gym\nimport numpy as np\nfrom utils import preprocess_frame\nfrom DeepQAgent import DeepQAgent\n\nBATCH_SIZE = 32\nBUFFER_START_SIZE = 40\nBUFFER_SIZE = 100\nTARGET_UPDATE = 50\nNUM_EPISODES = int(2e5)\nMAX_STEPS = int(1e6)\nGAMMA = 0.99\nEPSILON_DECAY_STEPS = int(1e6)\nMIN_EPS = 0.1\n\n\nenv = gym.make(\"Breakout-v0\")\ntotal_reward = 0\neps = 1\n\nagent = DeepQAgent(BUFFER_SIZE, env, BATCH_SIZE, BUFFER_START_SIZE)\n\ntotal_steps = 0\nfor ep in range(NUM_EPISODES):\n\n step = 0\n done = False\n obs_list = [preprocess_frame(env.reset())] * 5\n\n ep_reward = 0\n\n # Loop until MAX_STEPS reached or env returns done (checked at the bottom)\n while step < MAX_STEPS:\n step += 1\n total_steps += 1\n\n obs_list.pop(0)\n\n if np.random.random() < eps:\n action = env.action_space.sample()\n else:\n obs_nd = np.stack(obs_list, -1)\n action = np.argmax(agent.predict(obs_nd[None,]))\n\n obs_p, r, done, _ = env.step(action)\n obs_list.append(preprocess_frame(obs_p))\n\n total_reward += r\n ep_reward += r\n\n if done and r == 1:\n r = 200\n elif done:\n r = -200\n\n transition = (np.stack(obs_list, -1), action, r, done)\n agent.replay_queue.append(transition)\n\n minibatch = agent.sample_minibatch()\n\n #observations, actions, rewards, dones = minibatch\n observations = minibatch[0]\n actions = minibatch[1]\n rewards = minibatch[2]\n dones = minibatch[3]\n\n\n\n targets = agent.gen_target(observations[:, :, :, 0:4])\n targets_p = agent.gen_target(observations[:, :, :, 1:])\n\n print(targets_p.shape)\n\n arranged = np.arange(len(targets))\n targets[arranged, actions] = rewards + (dones==False) * GAMMA * np.max(targets_p, axis=1)\n\n agent.train_main(observations[:, :, :, 0:4], targets)\n\n if total_steps % TARGET_UPDATE == 0:\n agent.update_target_network()\n\n if done:\n agent.report(total_steps, step, ep_reward, ep)\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"numpy.max",
"numpy.random.random",
"numpy.stack"
]
] |
wjsutton/life_expectancy_in_chess
|
[
"58327bd04c4602faf939fc062a559293170a137d"
] |
[
"chess_death_positions.py"
] |
[
"from itertools import islice, cycle\nimport chess.pgn\nimport pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\nimport numpy as np\nimport glob\nimport datetime\n\nwith open('data/standard_matches/lichess_db_standard_rated_2013-01.pgn') as pgn:\n for file in range(10000):\n\n start_positions = pd.read_csv('data\\\\start_positions.csv')\n\n first_game = chess.pgn.read_game(pgn)\n\n site = first_game.headers['Site']\n black_elo = first_game.headers['BlackElo']\n white_elo = first_game.headers['WhiteElo']\n opening = first_game.headers['Opening']\n board = first_game.board()\n\n print(str(file) +', ' +site + ', started')\n\n san_list = []\n lan_list = []\n site_list = []\n black_elo_list = []\n white_elo_list = []\n opening_list = []\n\n for move in first_game.mainline_moves():\n san = board.san(move)\n lan = board.lan(move)\n san_list.append(san)\n lan_list.append(lan)\n site_list.append(site)\n black_elo_list.append(black_elo)\n white_elo_list.append(white_elo)\n opening_list.append(opening)\n board.push(move)\n\n df = pd.DataFrame()\n\n df['Site'] = site_list\n df['SAN'] = san_list\n df['LAN'] = lan_list\n df['BlackElo'] = black_elo_list\n df['WhiteElo'] = white_elo_list\n df['Opening'] = opening_list\n\n pat = ['white','black']\n df = df.assign(player=[*islice(cycle(pat), len(df))])\n \n df['LAN'] = df['LAN'].str.replace('O-O-O','O-OO',regex=False)\n df[['from_and_piece', 'to_and_result']] = df['LAN'].str.split('-|x',expand=True)\n\n df['from'] = df['from_and_piece'].str.strip().str[-2:] \n df['to'] = df['to_and_result'].str.strip().str[0:2]\n\n df['piece_moved'] = np.select(\n [\n df['from_and_piece'].str.len() == 2, \n df['from_and_piece'].str[0] == 'R',\n df['from_and_piece'].str[0] == 'B',\n df['from_and_piece'].str[0] == 'N',\n df['from_and_piece'].str[0] == 'Q',\n df['from_and_piece'].str[0] == 'K'\n ], \n [\n 'Pawn', \n 'Rook',\n 'Bishop',\n 'Knight',\n 'Queen',\n 'King'\n ], \n default='Unknown'\n )\n\n df['action'] = np.select(\n [\n df['LAN'].str.contains('#'),\n df['LAN'].str.contains('x'),\n df['LAN'].str.contains('=Q'),\n df['LAN'].str.contains(r'\\+'),\n df['LAN'].str.contains('-')\n \n ], \n [\n 'checkmate',\n 'kill',\n 'promoted to queen',\n 'check',\n 'move'\n ], \n default='Unknown'\n )\n df['index'] = df.index\n\n # queen side castling\n qs_castling = df.loc[df['LAN'] == 'O-O']\n\n if len(qs_castling)>0:\n print('Queen side Castling!')\n white_qs_castling = pd.DataFrame()\n black_qs_castling = pd.DataFrame()\n for i in range(len(qs_castling)):\n if qs_castling.iloc[i]['player'] == 'black':\n black_qs_castling['Site'] = [qs_castling.iloc[i]['Site'],qs_castling.iloc[i]['Site']]\n black_qs_castling['SAN'] = [qs_castling.iloc[i]['SAN'],qs_castling.iloc[i]['SAN']]\n black_qs_castling['LAN'] = [qs_castling.iloc[i]['LAN'],qs_castling.iloc[i]['LAN']]\n black_qs_castling['player'] = [qs_castling.iloc[i]['player'],qs_castling.iloc[i]['player']]\n black_qs_castling['from_and_piece'] = ['e8','h8']\n black_qs_castling['to_and_result'] = ['g8','f8']\n black_qs_castling['from'] = ['e8','h8']\n black_qs_castling['to'] = ['g8','f8']\n black_qs_castling['piece_moved'] = ['King','Rook']\n black_qs_castling['action'] = [qs_castling.iloc[i]['action'],qs_castling.iloc[i]['action']]\n black_qs_castling['index'] = [qs_castling.iloc[i]['index'],qs_castling.iloc[i]['index']]\n else:\n white_qs_castling['Site'] = [qs_castling.iloc[i]['Site'],qs_castling.iloc[i]['Site']]\n white_qs_castling['SAN'] = [qs_castling.iloc[i]['SAN'],qs_castling.iloc[i]['SAN']]\n white_qs_castling['LAN'] = [qs_castling.iloc[i]['LAN'],qs_castling.iloc[i]['LAN']]\n white_qs_castling['player'] = [qs_castling.iloc[i]['player'],qs_castling.iloc[i]['player']]\n white_qs_castling['from_and_piece'] = ['e1','h1']\n white_qs_castling['to_and_result'] = ['g1','f1']\n white_qs_castling['from'] = ['e1','h1']\n white_qs_castling['to'] = ['g1','f1']\n white_qs_castling['piece_moved'] = ['King','Rook']\n white_qs_castling['action'] = [qs_castling.iloc[i]['action'],qs_castling.iloc[i]['action']]\n white_qs_castling['index'] = [qs_castling.iloc[i]['index'],qs_castling.iloc[i]['index']]\n\n if (len(white_qs_castling) + len(black_qs_castling))>1:\n replacement_moves = white_qs_castling.append(black_qs_castling)\n elif len(black_qs_castling)>1:\n replacement_moves = black_qs_castling\n else: \n replacement_moves = white_qs_castling\n\n other_moves = df.loc[df['LAN'] != 'O-O']\n df = other_moves.append(replacement_moves)\n df = df.sort_values(by=['index'])\n df = df.reset_index()\n df['index'] = df.index\n\n # king side castling\n ks_castling = df.loc[df['LAN'] == 'O-O-O']\n if len(ks_castling)>0:\n print('King side Castling!')\n print(ks_castling)\n white_ks_castling = pd.DataFrame()\n black_ks_castling = pd.DataFrame()\n for i in range(len(ks_castling)):\n if ks_castling.iloc[i]['player'] == 'black':\n black_ks_castling['Site'] = [ks_castling.iloc[i]['Site'],ks_castling.iloc[i]['Site']]\n black_ks_castling['SAN'] = [ks_castling.iloc[i]['SAN'],ks_castling.iloc[i]['SAN']]\n black_ks_castling['LAN'] = [ks_castling.iloc[i]['LAN'],ks_castling.iloc[i]['LAN']]\n black_ks_castling['player'] = [ks_castling.iloc[i]['player'],ks_castling.iloc[i]['player']]\n black_qs_castling['from_and_piece'] = ['e8','a8']\n black_ks_castling['to_and_result'] = ['g8','f8']\n black_ks_castling['from'] = ['e8','a8']\n black_ks_castling['to'] = ['g8','f8']\n black_ks_castling['piece_moved'] = ['King','Rook']\n black_ks_castling['action'] = [ks_castling.iloc[i]['action'],ks_castling.iloc[i]['action']]\n black_ks_castling['index'] = [ks_castling.iloc[i]['index'],ks_castling.iloc[i]['index']]\n else:\n white_ks_castling['Site'] = [ks_castling.iloc[i]['Site'],ks_castling.iloc[i]['Site']]\n white_ks_castling['SAN'] = [ks_castling.iloc[i]['SAN'],ks_castling.iloc[i]['SAN']]\n white_ks_castling['LAN'] = [ks_castling.iloc[i]['LAN'],ks_castling.iloc[i]['LAN']]\n white_ks_castling['player'] = [ks_castling.iloc[i]['player'],ks_castling.iloc[i]['player']]\n white_ks_castling['from_and_piece'] = ['e1','a1']\n white_ks_castling['to_and_result'] = ['c1','d1']\n white_ks_castling['from'] = ['e1','a1']\n white_ks_castling['to'] = ['c1','d1']\n white_ks_castling['piece_moved'] = ['King','Rook']\n white_ks_castling['action'] = [ks_castling.iloc[i]['action'],ks_castling.iloc[i]['action']]\n white_ks_castling['index'] = [ks_castling.iloc[i]['index'],ks_castling.iloc[i]['index']]\n\n if (len(white_ks_castling) + len(black_ks_castling))>1:\n replacement_moves = white_ks_castling.append(black_ks_castling)\n elif len(black_ks_castling)>1:\n replacement_moves = black_ks_castling\n else: \n replacement_moves = white_ks_castling\n\n other_moves = df.loc[df['LAN'] != 'O-O-O']\n df = other_moves.append(replacement_moves)\n df = df.sort_values(by=['index'])\n\n for i in df['index']:\n current_move = df.loc[df['index'] == i]\n\n a = start_positions.loc[start_positions['start'] == current_move['from'][i]]\n b = start_positions.loc[start_positions['start'] != current_move['from'][i]]\n c = start_positions.loc[start_positions['start'] == current_move['to'][i]]\n\n a.loc[a['start'] == current_move['from'][i], 'start'] = current_move['to'][i]\n a = a.reset_index(drop=True)\n\n taken = ''\n\n if len(c['id']) >0:\n c = c.reset_index(drop=True)\n taken = c['id'][0]\n\n b.loc[b['id'] == taken, 'start'] = None\n\n entry = { \n 'index': i,\n 'piece_id': a['id'],\n 'killed': taken}\n\n entry_df = pd.DataFrame(entry)\n\n if len(entry_df)>1:\n print('Hey Will!')\n print(entry_df)\n\n if i == 0:\n kill_df = entry_df\n\n if i > 0:\n kill_df = kill_df.append(entry_df)\n\n start_positions = a.append(b)\n\n df['player_name'] = np.where(df['player']=='white', first_game.headers['White'], first_game.headers['Black'])\n result = np.where(first_game.headers['Result']=='1-0','white wins',np.where(first_game.headers['Result']=='0-1','black wins','draw'))\n df['result'] = np.where(df['player']=='white',result,result)\n start_positions['result'] = np.where(start_positions['player']=='white',result,result)\n start_positions['Site'] = np.where(start_positions['player']=='white',df['Site'][0],df['Site'][0])\n start_positions['survived'] = np.where(start_positions['start'].isnull(),False,True)\n start_positions['captured'] = np.where(start_positions['player'] == start_positions['result'].str[:5],False,True)\n start_positions['survived_or_captured'] = np.where(start_positions['piece'] == 'King',start_positions['captured'],start_positions['survived'])\n\n match_name_df = df[['player','player_name']].drop_duplicates()\n match_name = match_name_df['player_name'][0] + ' ('+match_name_df['player'][0]+') vs ' + match_name_df['player_name'][1] + ' ('+match_name_df['player'][1]+')'\n\n start_positions['match_name'] = match_name\n df['match_name'] = match_name\n\n df = pd.merge(df,kill_df,on='index', how='inner')\n start_positions = pd.merge(start_positions,kill_df, left_on='id',right_on='killed', how='left')\n\n del start_positions['index']\n del start_positions['killed']\n start_positions.rename(columns={'piece_id':'killed_by'}, inplace = True)\n\n if file == 0:\n match_df = df.reset_index(drop=True)\n survival_df = start_positions.reset_index(drop=True)\n else:\n match_df = match_df.append(df.reset_index(drop=True))\n survival_df = survival_df.append(start_positions.reset_index(drop=True))\n\n print(str(file) +', ' +site + ', complete! '+ str(datetime.datetime.utcnow()))\n\n\nrates = pd.read_csv('data\\\\chess_piece_survival_rates.csv')\ndel rates['piece']\ndel rates['player']\n\nsurvival_df = pd.merge(survival_df,rates, on ='id',how='inner')\n\nwhite_checkmate = match_df.loc[(match_df['piece_id'] == 'B-K') & (match_df['result'] == 'white wins') ]\nblack_checkmate = match_df.loc[(match_df['piece_id'] == 'W-K') & (match_df['result'] == 'black wins') ]\ncheckmate_df = pd.concat([white_checkmate,black_checkmate])\n\nwhite_lastmove = match_df.loc[(match_df['result'] == 'white wins')][['Site','index']]\nblack_lastmove = match_df.loc[(match_df['result'] == 'black wins')][['Site','index']]\n\nwhite_lastmove = white_lastmove.groupby(['Site'],as_index=False)['index'].agg({'index':'max'})\nwhite_lastmove = pd.merge(match_df,white_lastmove,on = ['index','Site'],how='inner')\nwhite_lastmove['killed'] = 'B-K'\nwhite_lastmove = white_lastmove[['Site','killed','piece_id']]\n\nblack_lastmove = black_lastmove.groupby(['Site'],as_index=False)['index'].agg({'index':'max'})\nblack_lastmove = pd.merge(match_df,black_lastmove,on = ['index','Site'],how='inner')\nblack_lastmove['killed'] = 'W-K'\nblack_lastmove = black_lastmove[['Site','killed','piece_id']]\n\nlastmove_df = pd.concat([white_lastmove,black_lastmove])\ncheckmate_df = pd.merge(checkmate_df[['Site','BlackElo','WhiteElo','Opening','to']],lastmove_df,on = 'Site',how='inner')\n\ndeath_df = match_df[['Site','BlackElo','WhiteElo','Opening','to','killed','piece_id']]\ndeath_df = death_df[death_df['killed'].notnull()]\ndeath_df = death_df.loc[death_df['killed'] != '']\nif len(checkmate_df) > 0:\n death_df = pd.concat([death_df,checkmate_df])\ndeath_df.columns = ['Site','BlackElo','WhiteElo','Opening','death_position','killed_piece_id','killed_by_piece_id']\n\n\ndeath_df['BlackElo'] = death_df['BlackElo'].str.replace('?','0',regex=False)\ndeath_df['WhiteElo'] = death_df['WhiteElo'].str.replace('?','0',regex=False)\n\ndeath_df['BlackElo'] = death_df['BlackElo'].str.replace('.*[-].*','0',regex=True)\ndeath_df['WhiteElo'] = death_df['WhiteElo'].str.replace('.*[-].*','0',regex=True)\nprint(death_df['BlackElo'])\nprint(death_df['WhiteElo'])\ndeath_df['BlackElo'] = death_df['BlackElo'].fillna(0)\ndeath_df['WhiteElo'] = death_df['WhiteElo'].fillna(0)\ndeath_df['BlackElo'] = death_df['BlackElo'].astype(int)\ndeath_df['WhiteElo'] = death_df['WhiteElo'].astype(int)\n\ndeath_df['BlackElo_broad'] = np.select(\n [\n death_df['BlackElo'] == 0,\n death_df['BlackElo'] < 1000,\n death_df['BlackElo'] < 1100,\n death_df['BlackElo'] < 1200,\n death_df['BlackElo'] < 1300,\n death_df['BlackElo'] < 1400,\n death_df['BlackElo'] < 1500,\n death_df['BlackElo'] < 1600,\n death_df['BlackElo'] < 1700,\n death_df['BlackElo'] < 1800,\n death_df['BlackElo'] < 1900,\n death_df['BlackElo'] < 2000,\n death_df['BlackElo'] >= 2000\n \n ], \n [\n 'Unknown',\n '0-1000',\n '1000-1099',\n '1100-1199',\n '1200-1299',\n '1300-1399',\n '1400-1499',\n '1500-1599',\n '1600-1699',\n '1700-1799',\n '1800-1899',\n '1900-1999',\n '2000+'\n ], \n default='Unknown'\n )\n\ndeath_df['WhiteElo_broad'] = np.select(\n [\n death_df['WhiteElo'] == 0,\n death_df['WhiteElo'] < 1000,\n death_df['WhiteElo'] < 1100,\n death_df['WhiteElo'] < 1200,\n death_df['WhiteElo'] < 1300,\n death_df['WhiteElo'] < 1400,\n death_df['WhiteElo'] < 1500,\n death_df['WhiteElo'] < 1600,\n death_df['WhiteElo'] < 1700,\n death_df['WhiteElo'] < 1800,\n death_df['WhiteElo'] < 1900,\n death_df['WhiteElo'] < 2000,\n death_df['WhiteElo'] >= 2000\n \n ], \n [\n 'Unknown',\n '0-1000',\n '1000-1099',\n '1100-1199',\n '1200-1299',\n '1300-1399',\n '1400-1499',\n '1500-1599',\n '1600-1699',\n '1700-1799',\n '1800-1899',\n '1900-1999',\n '2000+'\n ], \n default='Unknown'\n )\n\ndeath_df['Opening'] = death_df['Opening'].fillna('Unknown')\n\ndeath_df['Opening_broad'] = np.select(\n [\n death_df['Opening'].str.contains('Sicilian Defense'),\n death_df['Opening'].str.contains('French Defense'),\n death_df['Opening'].str.contains(\"Queen's Pawn Game\"),\n death_df['Opening'].str.contains(\"Scandinavian Defense\"),\n death_df['Opening'].str.contains(\"King's Pawn Game\"),\n death_df['Opening'].str.contains(\"Queen's Gambit\"),\n death_df['Opening'].str.contains(\"King's Gambit\")\n \n ], \n [\n 'Sicilian Defense',\n 'French Defense',\n \"Queen's Pawn Game\",\n \"Scandinavian Defense\",\n \"King's Pawn Game\",\n \"Queen's Gambit\",\n \"King's Gambit\"\n ], \n default='Other'\n )\n\noutput_df = death_df[['BlackElo_broad','WhiteElo_broad','Opening_broad','death_position','killed_piece_id','killed_by_piece_id']]\noutput_df = output_df.groupby(['BlackElo_broad','WhiteElo_broad','Opening_broad','death_position','killed_piece_id','killed_by_piece_id']).size().reset_index(name='counts')\n\ndeath_df.to_csv('data\\\\piece_death_positions.csv', index=False)\noutput_df.to_csv('data\\\\piece_death_positions_statistics.csv', index=False)"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.concat",
"pandas.DataFrame",
"numpy.select",
"numpy.where"
]
] |
bocklab/CloudVolumeServer
|
[
"9fda49e72d338612ce336fb6fce719489974e170"
] |
[
"process.py"
] |
[
"import multiprocessing as mp\nimport numpy as np\nimport pandas as pd\n\n\ndef _get_ids(vol, bl, co):\n \"\"\"Fetch block and extract IDs.\n\n Parameters\n ----------\n vol : CloudVolume\n Volume to query.\n bl : list-like\n Coordinates defining the block:\n left, right, top, bottom, z1, z2\n co : numpy array\n x/y/z coordinates WITHIN block\n of segment IDs to fetch.\n\n \"\"\"\n # Unpack block indices\n l, r, t, b, z1, z2 = bl\n\n # Subset block to relevant parts (i.e. those containing\n # requested coordinates) to save memory\n mn = co.min(axis=0)\n mx = co.max(axis=0) + 1\n l, r = l + mn[0], l + mx[0]\n t, b = t + mn[1], t + mx[1]\n z1, z2 = z1 + mn[2], z1 + mx[2]\n\n # Offset coordinates too\n co -= mn\n\n # Get the block\n chunk = vol[l:r, t:b, z1:z2]\n\n # Get the IDs out of the block\n co_id = chunk[co[:, 0], co[:, 1], co[:, 2]]\n\n return co_id\n\n\ndef get_multiple_ids(x, vol, max_workers=mp.cpu_count() - 5):\n \"\"\"Return multiple segment IDs using cloudvolume.\n\n Parameters\n ----------\n x : numpy array\n Array with x/y/z coordinates to fetch\n segmentation IDs for.\n vol : cloudvolume.CloudVolume\n\n \"\"\"\n # Make sure x is array\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n\n if not max_workers:\n max_workers = 1\n\n # Hard coded block size\n blocksize = np.array([128, 128, 32])\n\n # Make bins to fit with blocksize\n xbins = np.arange(0, np.max(x) + blocksize[0], blocksize[0])\n ybins = np.arange(0, np.max(x) + blocksize[1], blocksize[1])\n zbins = np.arange(0, np.max(x) + blocksize[2], blocksize[2])\n\n # Sort data into bins\n cbin = pd.DataFrame(x)\n cbin['x_bin'] = pd.cut(cbin[0], xbins, include_lowest=True, right=False)\n cbin['y_bin'] = pd.cut(cbin[1], ybins, include_lowest=True, right=False)\n cbin['z_bin'] = pd.cut(cbin[2], zbins, include_lowest=True, right=False)\n\n # This is now a dictionary of bin -> indices of coordinates\n blocked = cbin.groupby(['x_bin', 'y_bin', 'z_bin']).indices\n\n # Start process pool (do not use max cpu count -> appears to be a bottle neck)\n with mp.Pool(processes=max_workers) as pool:\n futures = []\n seg_ix = []\n # Iterate over all blocks\n for bl, co_ix in blocked.items():\n # Get this block's (i.e. the bin's) indices\n l, r, t, b, z1, z2 = (int(bl[0].left), int(bl[0].right),\n int(bl[1].left), int(bl[1].right),\n int(bl[2].left), int(bl[2].right))\n\n # Get the coordinates in this bin\n co = x[co_ix]\n\n # Offset coordinates by the chunk's coordinates\n # to produce \"in block coordinates\"\n co = co - np.array([l, t, z1])\n\n # Keep track of the indices of the coordinates we are querying\n # in this iteration\n seg_ix.append(co_ix)\n\n # Run the query\n futures.append(pool.apply_async(_get_ids,\n args=[vol,\n [l, r, t, b, z1, z2],\n co]))\n\n # Make sure all processes are complete\n seg_ids = np.vstack([f.get() for f in futures])\n\n # Turn list of list of indices into a flat array\n seg_ix = np.hstack(seg_ix)\n\n # Generate placeholder\n ordered = np.zeros(x.shape[0])\n\n # Populate with segment IDs\n ordered[seg_ix] = seg_ids.flatten()\n\n return ordered.astype(int)\n"
] |
[
[
"numpy.hstack",
"pandas.DataFrame",
"numpy.max",
"pandas.cut",
"numpy.array",
"numpy.zeros"
]
] |
mwillsey/incubator-tvm
|
[
"e02dc69fef294eb73dd65d18949ed9e108f60cda"
] |
[
"tests/python/relay/test_backend_interpreter.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport tvm\nfrom tvm import te\nimport tvm.testing\nfrom tvm import nd\nfrom tvm import relay\nfrom tvm.runtime import container\nfrom tvm.relay.backend.interpreter import RefValue, ConstructorValue\nfrom tvm.relay.scope_builder import ScopeBuilder\nfrom tvm.relay import testing, create_executor\n\n\ndef check_eval(expr, args, expected_result, mod=None, rtol=1e-07):\n # TODO(tqchen) add more types once the schedule register is fixed.\n for target in [\"llvm\"]:\n ctx = tvm.context(target, 0)\n if not tvm.testing.device_enabled(target):\n return\n intrp = create_executor(mod=mod, ctx=ctx, target=target)\n result = intrp.evaluate(expr)(*args)\n # use tvm.testing which also set atol\n tvm.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol)\n\n\ndef test_tuple_value():\n tv = container.tuple_object([relay.const(1), relay.const(2), relay.const(3)])\n np.testing.assert_allclose(tv[0].data.asnumpy(), 1)\n np.testing.assert_allclose(tv[1].data.asnumpy(), 2)\n np.testing.assert_allclose(tv[2].data.asnumpy(), 3)\n\n\ndef test_tuple_getitem():\n two = relay.add(relay.const(1), relay.const(1))\n func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))\n check_eval(func, [], 1)\n\n\ndef test_id():\n x = relay.var(\"x\", \"float32\")\n ident = relay.Function([x], x)\n one = np.array(1.0, \"float32\")\n check_eval(ident, [one], one)\n\n\ndef test_add_const():\n two = relay.add(relay.const(1), relay.const(1))\n func = relay.Function([], two)\n check_eval(func, [], 2)\n\n\ndef test_mul_param():\n x = relay.var(\"x\", shape=(10, 10))\n y = relay.var(\"y\", shape=(1, 10))\n func = relay.Function([x, y], relay.multiply(x, y))\n x_data = np.random.rand(10, 10).astype(\"float32\")\n y_data = np.random.rand(1, 10).astype(\"float32\")\n check_eval(func, [x_data, y_data], x_data * y_data)\n\n\ndef test_equal():\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n j = relay.var(\"i\", shape=[], dtype=\"int32\")\n z = relay.equal(i, j)\n func = relay.Function([i, j], z, ret_type=relay.TensorType([], \"bool\"))\n i_data = relay.const(0, \"int32\")\n j_data = relay.const(0, \"int32\")\n check_eval(func, [i_data, j_data], True)\n\n\ndef test_subtract():\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n sub = relay.subtract(i, relay.const(1, dtype=\"int32\"))\n func = relay.Function([i], sub, ret_type=relay.TensorType([], \"int32\"))\n i_data = np.array(1, dtype=\"int32\")\n check_eval(func, [i_data], 0)\n\n\ndef test_simple_loop():\n mod = tvm.IRModule({})\n sum_up = relay.GlobalVar(\"sum_up\")\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n sb = ScopeBuilder()\n with sb.if_scope(relay.equal(i, relay.const(0, dtype=\"int32\"))):\n sb.ret(i)\n with sb.else_scope():\n one_less = relay.subtract(i, relay.const(1, dtype=\"int32\"))\n rec_call = relay.Call(sum_up, [one_less])\n sb.ret(relay.add(rec_call, i))\n func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], \"int32\"))\n mod[sum_up] = func\n i_data = np.array(10, dtype=\"int32\")\n check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)\n\n\ndef test_loop():\n mod = tvm.IRModule({})\n sum_up = relay.GlobalVar(\"sum_up\")\n i = relay.var(\"i\", shape=[], dtype=\"int32\")\n accum = relay.var(\"accum\", shape=[], dtype=\"int32\")\n sb = ScopeBuilder()\n with sb.if_scope(relay.equal(i, relay.const(0, \"int32\"))):\n sb.ret(accum)\n with sb.else_scope():\n one_less = relay.subtract(i, relay.const(1, \"int32\"))\n new_accum = relay.add(accum, i)\n sb.ret(relay.Call(sum_up, [one_less, new_accum]))\n func = relay.Function([i, accum], sb.get())\n mod[sum_up] = func\n i_data = np.array(10, dtype=\"int32\")\n accum_data = np.array(0, dtype=\"int32\")\n check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)\n\n\ndef test_ref():\n mod = tvm.IRModule()\n three_with_ref = relay.GlobalVar(\"three_with_ref\")\n i = relay.Var(\"i\")\n iv = relay.Var(\"iv\")\n u = relay.Var(\"u\")\n uv = relay.Var(\"uv\")\n body = relay.add(iv, uv)\n body = relay.Let(uv, relay.RefRead(i), body)\n body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)\n body = relay.Let(iv, relay.RefRead(i), body)\n body = relay.Let(i, relay.RefCreate(relay.const(1)), body)\n mod[three_with_ref] = relay.Function([], body)\n check_eval(three_with_ref, [], 3, mod=mod)\n\n\ndef test_binds():\n x = relay.var(\"x\")\n y = relay.add(x, x)\n intrp = create_executor(\"debug\")\n xx = np.ones((10, 20))\n res = intrp.evaluate(y, binds={x: xx}).asnumpy()\n tvm.testing.assert_allclose(xx + xx, res)\n\n\ndef test_kwargs_params():\n x = relay.var(\"x\", shape=(1, 10))\n y = relay.var(\"y\", shape=(1, 10))\n z = relay.var(\"z\", shape=(1, 10))\n f = relay.Function([x, y, z], x + y + z)\n x_data = np.random.rand(1, 10).astype(\"float32\")\n y_data = np.random.rand(1, 10).astype(\"float32\")\n z_data = np.random.rand(1, 10).astype(\"float32\")\n params = {\"y\": y_data, \"z\": z_data}\n intrp = create_executor(\"debug\")\n res = intrp.evaluate(f)(x_data, **params)\n tvm.testing.assert_allclose(res.asnumpy(), x_data + y_data + z_data)\n\n\ndef test_function_taking_adt_ref_tuple():\n mod = tvm.IRModule()\n prelude = relay.prelude.Prelude(mod)\n intrp = create_executor(\"debug\", mod)\n\n nil_value = ConstructorValue(prelude.nil.tag, [], prelude.nil)\n cons_value = ConstructorValue(\n prelude.cons.tag,\n [nd.array(np.random.rand(1, 10).astype(\"float32\")), nil_value],\n prelude.cons,\n )\n\n ref_value = RefValue(nd.array(np.random.rand(1, 10).astype(\"float32\")))\n tuple_value = container.tuple_object(\n [nd.array(np.random.rand(1, 10).astype(\"float32\")) for _ in range(10)]\n )\n\n id_func = intrp.evaluate(prelude.id)\n\n res_nil = id_func(nil_value)\n assert res_nil.tag == nil_value.tag\n assert len(res_nil.fields) == 0\n\n res_cons = id_func(cons_value)\n assert res_cons.tag == cons_value.tag\n assert len(res_cons.fields) == len(cons_value.fields)\n tvm.testing.assert_allclose(res_cons.fields[0].asnumpy(), cons_value.fields[0].asnumpy())\n assert isinstance(res_cons.fields[1], ConstructorValue)\n assert res_cons.fields[1].tag == prelude.nil.tag\n assert len(res_cons.fields[1].fields) == 0\n\n res_ref = id_func(ref_value)\n tvm.testing.assert_allclose(res_ref.value.asnumpy(), ref_value.value.asnumpy())\n\n res_tuple = id_func(tuple_value)\n for i in range(10):\n tvm.testing.assert_allclose(res_tuple[i].asnumpy(), tuple_value[i].asnumpy())\n\n\ndef test_tuple_passing():\n x = relay.var(\n \"x\",\n type_annotation=relay.ty.TupleType(\n [relay.ty.TensorType((), \"int64\"), relay.ty.TensorType((), \"int64\")]\n ),\n )\n\n fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))\n mod = tvm.IRModule({})\n gv = relay.GlobalVar(\"main\")\n mod[gv] = fn\n mod = relay.transform.InferType()(mod)\n\n ctx = tvm.cpu()\n target = tvm.target.Target(\"llvm\")\n exec = relay.create_executor(mod=mod, ctx=ctx, target=target)\n f = exec.evaluate(gv)\n # First use a Python tuple.\n out = f((10, 8))\n tvm.testing.assert_allclose(out.asnumpy(), np.array(10))\n # Second use a tuple value.\n value_tuple = container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))])\n out = f(value_tuple)\n tvm.testing.assert_allclose(out.asnumpy(), np.array(11))\n\n\nif __name__ == \"__main__\":\n test_id()\n test_add_const()\n test_equal()\n test_subtract()\n test_simple_loop()\n test_loop()\n test_binds()\n test_kwargs_params()\n test_ref()\n test_tuple_value()\n test_tuple_getitem()\n test_function_taking_adt_ref_tuple()\n test_tuple_passing()\n"
] |
[
[
"numpy.array",
"numpy.random.rand",
"numpy.ones"
]
] |
grainpowder/gru-forward-numpy-app
|
[
"efd24f9f397d51e7e18bdad5cba12451ad69d3de"
] |
[
"src/npgru/predictor/tensorflow_predictor.py"
] |
[
"from typing import List, Tuple\n\nimport sentencepiece as spm\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\nfrom npgru.predictor.category_predictor import CategoryPredictor\nfrom npgru.preprocessor.model_file import get_model_dir\n\n\nclass TensorflowPredictor(CategoryPredictor):\n\n def __init__(self):\n model_dir = get_model_dir()\n self._tokenizer = spm.SentencePieceProcessor(model_file=str(model_dir.joinpath(\"tokenizer.model\")))\n self._model = keras.models.load_model(model_dir.joinpath(\"tensorflow\"))\n\n def predict(self, title: str, num_predictions) -> List[Tuple[int, float]]:\n tokenized_title = self._tokenizer.encode(title) if title else [1]\n probabilities = self._model(tf.constant([tokenized_title]))\n prediction = sorted(enumerate(probabilities.numpy()[0]), key=lambda x: x[1], reverse=True)[:num_predictions]\n return prediction\n"
] |
[
[
"tensorflow.constant"
]
] |
tanlin2013/TNpy
|
[
"bc450825f79b6a95ad724ed05c61fda8e0545975"
] |
[
"tnpy/finite_dmrg.py"
] |
[
"import time\nimport logging\nimport numpy as np\nfrom tensornetwork import Node\nfrom itertools import count\nfrom tnpy.finite_algorithm_base import FiniteAlgorithmBase\nfrom tnpy.linalg import svd, eigshmv\nfrom tnpy.operators import MPO\nfrom typing import Iterable, Union, Tuple\n\n\nclass FiniteDMRG(FiniteAlgorithmBase):\n\n def __init__(self, mpo: MPO, chi: Union[int, None], init_method='random'):\n \"\"\"\n\n Args:\n mpo:\n chi: Maximum bond dimension of MPS\n init_method: 'random' or a filepath\n \"\"\"\n super(FiniteDMRG, self).__init__(mpo, chi, init_method)\n\n def _unit_solver(self, site, tol=1e-7) -> Tuple[float, np.ndarray]:\n W = self.mpo[site]\n\n def matvec(x):\n M = Node(x.reshape(self.mps_shape(site)))\n if site == 0:\n R = self.right_envs[site]\n R[0] ^ M[2]\n R[1] ^ W[0]\n M[1] ^ W[1]\n result = M @ W @ R\n elif site == self.N-1:\n L = self.left_envs[site]\n L[0] ^ M[0]\n L[1] ^ W[0]\n M[1] ^ W[1]\n result = L @ M @ W\n else:\n L = self.left_envs[site]\n R = self.right_envs[site]\n L[0] ^ M[0]\n L[1] ^ W[0]\n R[0] ^ M[2]\n R[1] ^ W[1]\n M[1] ^ W[2]\n result = L @ M @ W @ R\n return result.tensor.reshape(x.shape)\n v0 = self._mps.get_tensor(site).reshape(-1, 1)\n return eigshmv(matvec, v0, tol=0.1*tol)\n\n def _modified_density_matrix(self, site, alpha=0):\n # TODO: return dm\n NotImplemented\n\n def sweep(self, iterator: Iterable, tol: float = 1e-7) -> float:\n \"\"\"\n\n Args:\n iterator:\n tol:\n\n Returns:\n\n \"\"\"\n direction = 1 if iterator[0] < iterator[-1] else -1\n for site in iterator:\n E, theta = self._unit_solver(site, tol)\n logging.info(f\"Sweeping to site [{site+1}/{self.N}], E/N = {E/self.N}\")\n if direction == 1:\n theta = theta.reshape(self.d * self.mps_shape(site)[0], -1)\n elif direction == -1:\n theta = theta.reshape(-1, self.d * self.mps_shape(site)[2])\n u, s, vt = svd(theta, chi=self.mps_shape(site)[1+direction])\n if direction == 1:\n self._mps.tensors[site] = u.reshape(self.mps_shape(site))\n residual = Node(np.dot(np.diagflat(s), vt))\n M = Node(self._mps.get_tensor(site+1))\n residual[1] ^ M[0]\n self._mps.tensors[site+1] = (residual @ M).tensor\n self._update_left_env(site+1)\n elif direction == -1:\n self._mps.tensors[site] = vt.reshape(self.mps_shape(site))\n residual = Node(np.dot(u, np.diagflat(s)))\n M = Node(self._mps.get_tensor(site-1))\n M[2] ^ residual[0]\n self._mps.tensors[site-1] = (M @ residual).tensor\n self._update_right_env(site-1)\n return E\n\n def update(self, tol: float = 1e-7, max_sweep: int = 100):\n \"\"\"\n\n Args:\n tol:\n max_sweep:\n\n Returns:\n\n \"\"\"\n logging.info(f\"Set up tol = {tol}, up to maximally {max_sweep} sweeps\")\n clock = [time.process_time()]\n for n_sweep in count(start=1):\n logging.info(f\"In sweep epoch [{n_sweep}/{max_sweep}]\")\n El = self.sweep(range(self.N-1))\n Er = self.sweep(range(self.N-1, 0, -1))\n clock.append(time.process_time()-clock[-1])\n dE = (El - Er)/self.N\n if abs(dE) < tol:\n break\n elif n_sweep == max_sweep:\n # @TODO: dump mps to file and raise error\n logging.warning(f\"Maximum number of sweeps {max_sweep} reached, \"\n f\"yet dE/N = {dE} > tol = {tol}\")\n break\n elif abs(dE) > tol and dE < 0:\n raise ValueError(f\"Fail on lowering energy, got dE/N = {dE}\")\n logging.info(f\"{n_sweep} loops, best of 3: {np.mean(np.sort(clock)[:3])} sec per loop\")\n\n\nclass Projector:\n\n def __init__(self):\n NotImplemented\n"
] |
[
[
"numpy.diagflat",
"numpy.sort"
]
] |
AlexImb/automl-streams
|
[
"f730918f2a006def405b4c8c96b7849adc23eb2a"
] |
[
"demos/tpot/results/batch_pipeline_covtype.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom tpot.builtins import StackingEstimator\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\n train_test_split(features, tpot_data['target'], random_state=None)\n\n# Average CV score on the training set was: 0.6119910991155427\nexported_pipeline = make_pipeline(\n StackingEstimator(estimator=BernoulliNB(alpha=0.001, fit_prior=True)),\n ExtraTreesClassifier(bootstrap=False, criterion=\"entropy\", max_features=0.1, min_samples_leaf=3, min_samples_split=12, n_estimators=100)\n)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n"
] |
[
[
"sklearn.ensemble.ExtraTreesClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.naive_bayes.BernoulliNB"
]
] |
cypherics/ttAugment
|
[
"149759f9c1ef86b7d0f5e634dd4ed16f868f56e5"
] |
[
"tt_augment/tt_custom/tt_fwd_bkd.py"
] |
[
"import cv2\nimport numpy as np\nfrom imgaug import imresize_single_image\n\nfrom imgaug.augmenters import sm, meta\nfrom imgaug.augmenters.flip import fliplr\n\n\nclass MirrorFWD(meta.Augmenter):\n \"\"\"\n Mirror the pixel to get to network_dimension\n \"\"\"\n\n def __init__(self, network_dimension: tuple, transform_dimension: tuple):\n super().__init__()\n self.network_dimension = network_dimension\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n\n images = batch.images\n nb_images = len(images)\n result = []\n for i in sm.xrange(nb_images):\n img = images[i]\n\n limit_w = (self.network_dimension[2] - self.transform_dimension[2]) // 2\n\n limit_h = (self.network_dimension[1] - self.transform_dimension[1]) // 2\n img = cv2.copyMakeBorder(\n img,\n limit_h,\n limit_h,\n limit_w,\n limit_w,\n borderType=cv2.BORDER_REFLECT_101,\n )\n result.append(img)\n batch.images = np.array(result, images.dtype)\n return batch\n\n\nclass MirrorBKD(meta.Augmenter):\n \"\"\"\n Remove the added pixel, Reverse of MirrorFWD\n \"\"\"\n\n def __init__(self, network_dimension: tuple, transform_dimension: tuple):\n super().__init__()\n self.network_dimension = network_dimension\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n images = batch.images\n nb_images = len(images)\n result = []\n\n image_width, image_height = self.network_dimension[2], self.network_dimension[1]\n\n crop_width, crop_height = (\n self.transform_dimension[2],\n self.transform_dimension[1],\n )\n\n for i in sm.xrange(nb_images):\n img = images[i]\n\n dy = (image_height - crop_height) // 2\n dx = (image_width - crop_width) // 2\n\n y1 = dy\n y2 = y1 + crop_height\n x1 = dx\n x2 = x1 + crop_width\n\n result.append(img[y1:y2, x1:x2, :])\n batch.images = np.array(result, images.dtype)\n return batch\n\n\nclass FlipLR(meta.Augmenter):\n \"\"\"\n FLip an image\n \"\"\"\n\n def __init__(self, transform_dimension: tuple):\n super().__init__()\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n for i, images in enumerate(batch.images):\n batch.images[i] = fliplr(batch.images[i])\n return batch\n\n\nclass FlipUD(meta.Augmenter):\n \"\"\"\n Flip an image\n \"\"\"\n\n def __init__(self, transform_dimension: tuple):\n super().__init__()\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n for i, images in enumerate(batch.images):\n batch.images[i] = batch.images[i][::-1, ...]\n return batch\n\n\nclass RotateFWD(meta.Augmenter):\n \"\"\"\n Rotate an image with angle\n \"\"\"\n\n def __init__(self, angle_axis: int, transform_dimension: tuple):\n super().__init__()\n self.transform_dimension = transform_dimension\n self.angle_axis = angle_axis\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n result = list()\n for i, images in enumerate(batch.images):\n image_rs = np.rot90(images, self.angle_axis)\n result.append(image_rs)\n batch.images = np.array(result, batch.images.dtype)\n return batch\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n\nclass RotateBKD(meta.Augmenter):\n \"\"\"\n Rotate an image with -angle, Reverse the Rotate transformation\n \"\"\"\n\n def __init__(self, angle_axis: int, transform_dimension: tuple):\n super().__init__()\n self.transform_dimension = transform_dimension\n self.angle_axis = angle_axis\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n result = list()\n for i, images in enumerate(batch.images):\n image_rs = np.rot90(images, -self.angle_axis)\n result.append(image_rs)\n batch.images = np.array(result, batch.images.dtype)\n return batch\n\n\nclass ScaleFWD(meta.Augmenter):\n \"\"\"\n Scale an image from transform_dimension to network_dimension\n \"\"\"\n\n def __init__(self, network_dimension: tuple, transform_dimension: tuple):\n super().__init__()\n self.network_dimension = network_dimension\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n result = list()\n for i, images in enumerate(batch.images):\n image_rs = imresize_single_image(\n images,\n (self.network_dimension[1], self.network_dimension[2]),\n interpolation=\"nearest\",\n )\n result.append(image_rs)\n batch.images = np.array(result, batch.images.dtype)\n return batch\n\n\nclass ScaleBKD(meta.Augmenter):\n \"\"\"\n Scale an image from network_dimension to transform_dimension\n \"\"\"\n\n def __init__(self, network_dimension: tuple, transform_dimension: tuple):\n super().__init__()\n self.network_dimension = network_dimension\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n result = list()\n for i, images in enumerate(batch.images):\n image_rs = imresize_single_image(\n images,\n (self.transform_dimension[1], self.transform_dimension[2]),\n interpolation=\"nearest\",\n )\n result.append(image_rs)\n batch.images = np.array(result, batch.images.dtype)\n return batch\n\n\nclass NoSegAug(meta.Augmenter):\n def __init__(self, network_dimension: tuple, transform_dimension: tuple):\n super().__init__()\n self.network_dimension = network_dimension\n self.transform_dimension = transform_dimension\n\n def get_parameters(self):\n return [self.network_dimension, self.transform_dimension]\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n return batch\n"
] |
[
[
"numpy.rot90",
"numpy.array"
]
] |
Dazzid/Deep_Learning_Techniques_Applied_to_Estimate_Music_Gestural_Patterns
|
[
"4a61a3d85429a978cb520a9efacee537747f813d"
] |
[
"L3_Conv_LSTM_Model.py"
] |
[
"# convlstm model\nimport numpy as np\nimport csv\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n# load a single file as a numpy array\ndef load_file(filepath):\n data = []\n with open(filepath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n data.append(row)\n return np.array(data)\n\n# load a list of files and return as a 3d numpy array\ndef load_group(filenames, prefix=''):\n loaded = list()\n for name in filenames:\n data = load_file(prefix + name)\n loaded.append(data)\n # stack group so that features are the 3rd dimension\n loaded = np.dstack(loaded)\n return loaded\n\n# load a dataset group, such as train or test\ndef load_dataset_group(group, prefix=''):\n filepath = prefix + group\n # load all 9 files as a single array\n\n # total acceleration\n filenames = ['01_acc_x.csv', '02_acc_y.csv', '03_acc_z.csv',\n '04_gyro_x.csv', '05_gyro_y.csv', '06_gyro_z.csv',\n '07_euler_x.csv', '08_euler_y.csv', '09_euler_z.csv']\n\n # load input data\n X = load_group(filenames, filepath).astype(np.float64)\n # load class output\n y = load_file(prefix + group + '10_label.csv').astype(np.int)\n return X, y\n\n# load the dataset, returns train and test X and y elements\ndef load_dataset(prefix=''):\n # load all train\n trainX, trainy = load_dataset_group('train/', prefix + 'data/Gestures/Groups/')\n # load all test\n testX, testy = load_dataset_group('test/', prefix + 'data/Gestures/Groups/')\n # zero-offset class values\n trainy = trainy - 1\n testy = testy - 1\n # one hot encode y\n trainy = tf.keras.utils.to_categorical(trainy)\n testy = tf.keras.utils.to_categorical(testy)\n return trainX, trainy, testX, testy\n\n# fit and evaluate a model\ndef evaluate_model(trainX, trainy, testX, testy, batches):\n # define model\n batch_size = batches\n verbose, epochs = 0, 50\n n_features, n_outputs = trainX.shape[2], trainy.shape[1]\n # reshape into subsequences (samples, time steps, rows, cols, channels)\n n_steps, n_length = 3, 50\n trainX = trainX.reshape((trainX.shape[0], n_steps, 1, n_length, n_features))\n testX = testX.reshape((testX.shape[0], n_steps, 1, n_length, n_features))\n # define model\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.ConvLSTM2D(64, (1,3), activation='relu', input_shape=(n_steps, 1, n_length, n_features)))\n model.add(tf.keras.layers.Dropout(0.5))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(100, activation='relu'))\n model.add(tf.keras.layers.Dense(n_outputs, activation='softmax'))\n tf.keras.utils.plot_model(model, show_shapes=False, show_layer_names=True, to_file='figues/Conv_LSTM_Model.png')\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n # fit network\n history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_data=(testX, testy))\n # evaluate model\n _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)\n return accuracy, history\n\n# summarize scores\ndef summarize_results(scores, params):\n print(scores, params)\n # summarize mean and standard deviation\n for i in range(len(scores)):\n m, s = np.mean(scores[i]), np.std(scores[i])\n print('Param = %d: %.3f%% (+/-%.3f)' % (params[i], m, s))\n # boxplot of scores\n plt.boxplot(scores, labels=params)\n plt.savefig('figures/ConvLSTM2D.png')\n plt.show()\n\n# run an experiment\ndef run_experiment(repeats=10):\n # load data\n trainX, trainy, testX, testy = load_dataset()\n final_scores = list()\n batches = [8, 16, 32, 64, 128, 256]\n for i in range(len(batches)):\n scores = list()\n # repeat experiment\n for r in range(repeats):\n score, history = evaluate_model(trainX, trainy, testX, testy, batches[i])\n score = score * 100.0\n print('>#%d: %.3f' % (r+1, score))\n scores.append(score)\n # summarize results\n final_scores.append(scores)\n summarize_results(final_scores, batches)\n return score, history\n\ndef plot_learning_curves(loss, val_loss):\n plt.plot(np.arange(len(loss)) + 0.5, loss, \"b.-\", label=\"Training loss\")\n plt.plot(np.arange(len(val_loss)) + 1, val_loss, \"r.-\", label=\"Validation loss\")\n plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))\n plt.axis([1, 50, 0, 0.5])\n plt.legend(fontsize=14)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss\")\n plt.grid(True)\n\n_, history = run_experiment(10)\nplot_learning_curves(history.history[\"loss\"], history.history[\"val_loss\"])\nplt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.keras.Sequential",
"numpy.mean",
"tensorflow.keras.layers.ConvLSTM2D",
"matplotlib.pyplot.gca",
"numpy.std",
"matplotlib.pyplot.axis",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.utils.plot_model",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.boxplot",
"numpy.dstack",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.grid",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.utils.to_categorical"
]
] |
gasperpodobnik/nnUNet
|
[
"f11906b13344db9f54e303378748a0defdea8331"
] |
[
"nnunet/utilities/overlay_plots.py"
] |
[
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nfrom multiprocessing.pool import Pool\r\n\r\nimport numpy as np\r\nimport SimpleITK as sitk\r\nfrom nnunet.utilities.task_name_id_conversion import convert_task_name_to_id, convert_id_to_task_name\r\nfrom batchgenerators.utilities.file_and_folder_operations import *\r\nfrom nnunet.paths import *\r\n\r\ncolor_cycle = (\r\n \"000000\",\r\n \"4363d8\",\r\n \"f58231\",\r\n \"3cb44b\",\r\n \"e6194B\",\r\n \"911eb4\",\r\n \"ffe119\",\r\n \"bfef45\",\r\n \"42d4f4\",\r\n \"f032e6\",\r\n \"000075\",\r\n \"9A6324\",\r\n \"808000\",\r\n \"800000\",\r\n \"469990\",\r\n)\r\n\r\n\r\ndef hex_to_rgb(hex: str):\r\n assert len(hex) == 6\r\n return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4))\r\n\r\n\r\ndef generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, mapping: dict = None, color_cycle=color_cycle,\r\n overlay_intensity=0.6):\r\n \"\"\"\r\n image must be a color image, so last dimension must be 3. if image is grayscale, tile it first!\r\n Segmentation must be label map of same shape as image (w/o color channels)\r\n mapping can be label_id -> idx_in_cycle or None\r\n\r\n returned image is scaled to [0, 255]!!!\r\n \"\"\"\r\n # assert len(image.shape) == len(segmentation.shape)\r\n # assert all([i == j for i, j in zip(image.shape, segmentation.shape)])\r\n\r\n # create a copy of image\r\n image = np.copy(input_image)\r\n\r\n if len(image.shape) == 2:\r\n image = np.tile(image[:, :, None], (1, 1, 3))\r\n elif len(image.shape) == 3:\r\n assert image.shape[2] == 3, 'if 3d image is given the last dimension must be the color channels ' \\\r\n '(3 channels). Only 2D images are supported'\r\n\r\n else:\r\n raise RuntimeError(\"unexpected image shape. only 2D images and 2D images with color channels (color in \"\r\n \"last dimension) are supported\")\r\n\r\n # rescale image to [0, 255]\r\n image = image - image.min()\r\n image = image / image.max() * 255\r\n\r\n # create output\r\n\r\n if mapping is None:\r\n uniques = np.unique(segmentation)\r\n mapping = {i: c for c, i in enumerate(uniques)}\r\n\r\n for l in mapping.keys():\r\n image[segmentation == l] += overlay_intensity * np.array(hex_to_rgb(color_cycle[mapping[l]]))\r\n\r\n # rescale result to [0, 255]\r\n image = image / image.max() * 255\r\n return image.astype(np.uint8)\r\n\r\n\r\ndef plot_overlay(image_file: str, segmentation_file: str, output_file: str, overlay_intensity: float = 0.6):\r\n import matplotlib.pyplot as plt\r\n\r\n image = sitk.GetArrayFromImage(sitk.ReadImage(image_file))\r\n seg = sitk.GetArrayFromImage(sitk.ReadImage(segmentation_file))\r\n assert all([i == j for i, j in zip(image.shape, seg.shape)]), \"image and seg do not have the same shape: %s, %s\" % (\r\n image_file, segmentation_file)\r\n\r\n assert len(image.shape) == 3, 'only 3D images/segs are supported'\r\n\r\n fg_mask = seg != 0\r\n fg_per_slice = fg_mask.sum((1, 2))\r\n selected_slice = np.argmax(fg_per_slice)\r\n\r\n overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity)\r\n\r\n plt.imsave(output_file, overlay)\r\n\r\n\r\ndef plot_overlay_preprocessed(case_file: str, output_file: str, overlay_intensity: float = 0.6, modality_index=0):\r\n import matplotlib.pyplot as plt\r\n data = np.load(case_file)['data']\r\n\r\n assert modality_index < (data.shape[0] - 1), 'This dataset only supports modality index up to %d' % (data.shape[0] - 2)\r\n\r\n image = data[modality_index]\r\n seg = data[-1]\r\n seg[seg < 0] = 0\r\n\r\n fg_mask = seg > 0\r\n fg_per_slice = fg_mask.sum((1, 2))\r\n selected_slice = np.argmax(fg_per_slice)\r\n\r\n overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity)\r\n\r\n plt.imsave(output_file, overlay)\r\n\r\n\r\ndef multiprocessing_plot_overlay(list_of_image_files, list_of_seg_files, list_of_output_files, overlay_intensity,\r\n num_processes=8):\r\n p = Pool(num_processes)\r\n r = p.starmap_async(plot_overlay, zip(\r\n list_of_image_files, list_of_seg_files, list_of_output_files, [overlay_intensity] * len(list_of_output_files)\r\n ))\r\n r.get()\r\n p.close()\r\n p.join()\r\n\r\n\r\ndef multiprocessing_plot_overlay_preprocessed(list_of_case_files, list_of_output_files, overlay_intensity,\r\n num_processes=8, modality_index=0):\r\n p = Pool(num_processes)\r\n r = p.starmap_async(plot_overlay_preprocessed, zip(\r\n list_of_case_files, list_of_output_files, [overlay_intensity] * len(list_of_output_files),\r\n [modality_index] * len(list_of_output_files)\r\n ))\r\n r.get()\r\n p.close()\r\n p.join()\r\n\r\n\r\ndef generate_overlays_for_task(task_name_or_id, output_folder, num_processes=8, modality_idx=0, use_preprocessed=True,\r\n data_identifier=default_data_identifier):\r\n if isinstance(task_name_or_id, str):\r\n if not task_name_or_id.startswith(\"Task\"):\r\n task_name_or_id = int(task_name_or_id)\r\n task_name = convert_id_to_task_name(task_name_or_id)\r\n else:\r\n task_name = task_name_or_id\r\n else:\r\n task_name = convert_id_to_task_name(int(task_name_or_id))\r\n\r\n if not use_preprocessed:\r\n folder = join(nnUNet_raw_data, task_name)\r\n\r\n identifiers = [i[:-7] for i in subfiles(join(folder, 'labelsTr'), suffix='.nii.gz', join=False)]\r\n\r\n image_files = [join(folder, 'imagesTr', i + \"_%04.0d.nii.gz\" % modality_idx) for i in identifiers]\r\n seg_files = [join(folder, 'labelsTr', i + \".nii.gz\") for i in identifiers]\r\n\r\n assert all([isfile(i) for i in image_files])\r\n assert all([isfile(i) for i in seg_files])\r\n\r\n maybe_mkdir_p(output_folder)\r\n output_files = [join(output_folder, i + '.png') for i in identifiers]\r\n multiprocessing_plot_overlay(image_files, seg_files, output_files, 0.6, num_processes)\r\n else:\r\n folder = join(preprocessing_output_dir, task_name)\r\n if not isdir(folder): raise RuntimeError(\"run preprocessing for that task first\")\r\n matching_folders = subdirs(folder, prefix=data_identifier + \"_stage\")\r\n if len(matching_folders) == 0: \"run preprocessing for that task first (use default experiment planner!)\"\r\n matching_folders.sort()\r\n folder = matching_folders[-1]\r\n identifiers = [i[:-4] for i in subfiles(folder, suffix='.npz', join=False)]\r\n maybe_mkdir_p(output_folder)\r\n output_files = [join(output_folder, i + '.png') for i in identifiers]\r\n image_files = [join(folder, i + \".npz\") for i in identifiers]\r\n maybe_mkdir_p(output_folder)\r\n multiprocessing_plot_overlay_preprocessed(image_files, output_files, overlay_intensity=0.6,\r\n num_processes=num_processes, modality_index=modality_idx)\r\n\r\n\r\ndef entry_point_generate_overlay():\r\n import argparse\r\n parser = argparse.ArgumentParser(\"Plots png overlays of the slice with the most foreground. Note that this \"\r\n \"disregards spacing information!\")\r\n parser.add_argument('-t', type=str, help=\"task name or task ID\", required=True)\r\n parser.add_argument('-o', type=str, help=\"output folder\", required=True)\r\n parser.add_argument('-num_processes', type=int, default=8, required=False, help=\"number of processes used. Default: 8\")\r\n parser.add_argument('-modality_idx', type=int, default=0, required=False,\r\n help=\"modality index used (0 = _0000.nii.gz). Default: 0\")\r\n parser.add_argument('--use_raw', action='store_true', required=False, help=\"if set then we use raw data. else \"\r\n \"we use preprocessed\")\r\n args = parser.parse_args()\r\n\r\n generate_overlays_for_task(args.t, args.o, args.num_processes, args.modality_idx, use_preprocessed=not args.use_raw)"
] |
[
[
"matplotlib.pyplot.imsave",
"numpy.unique",
"numpy.tile",
"numpy.copy",
"numpy.argmax",
"numpy.load"
]
] |
wannabeOG/NLP-Fall-2021-Course-Project
|
[
"4a4e46733915c09ecf1389e6aea50f93f8fd34f1"
] |
[
"fairseq/options.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\n\nimport torch\nimport sys\n\nfrom fairseq import utils\nfrom fairseq.data.indexed_dataset import get_available_dataset_impl\n\n\ndef get_preprocessing_parser(default_task='translation'):\n parser = get_parser('Preprocessing', default_task)\n add_preprocess_args(parser)\n return parser\n\n\ndef get_training_parser(default_task='translation'):\n parser = get_parser('Trainer', default_task)\n add_dataset_args(parser, train=True)\n add_distributed_training_args(parser)\n add_model_args(parser)\n add_optimization_args(parser)\n add_checkpoint_args(parser)\n return parser\n\n\ndef get_generation_parser(interactive=False, default_task='translation'):\n parser = get_parser('Generation', default_task)\n add_dataset_args(parser, gen=True)\n add_generation_args(parser)\n if interactive:\n add_interactive_args(parser)\n return parser\n\n\ndef get_interactive_generation_parser(default_task='translation'):\n return get_generation_parser(interactive=True, default_task=default_task)\n\n\ndef get_eval_lm_parser(default_task='language_modeling'):\n parser = get_parser('Evaluate Language Model', default_task)\n add_dataset_args(parser, gen=True)\n add_eval_lm_args(parser)\n return parser\n\n\ndef get_validation_parser(default_task=None):\n parser = get_parser('Validation', default_task)\n add_dataset_args(parser, train=True)\n group = parser.add_argument_group('Evaluation')\n add_common_eval_args(group)\n return parser\n\n\ndef eval_str_list(x, type=float):\n if x is None:\n return None\n if isinstance(x, str):\n x = eval(x)\n try:\n return list(map(type, x))\n except TypeError:\n return [type(x)]\n\n\ndef eval_bool(x, default=False):\n if x is None:\n return default\n try:\n return bool(eval(x))\n except TypeError:\n return default\n\n\ndef parse_args_and_arch(parser, input_args=None, parse_known=False, suppress_defaults=False):\n if suppress_defaults:\n # Parse args without any default values. This requires us to parse\n # twice, once to identify all the necessary task/model args, and a second\n # time with all defaults set to None.\n args = parse_args_and_arch(\n parser,\n input_args=input_args,\n parse_known=parse_known,\n suppress_defaults=False,\n )\n suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])\n suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})\n args = suppressed_parser.parse_args(input_args)\n return argparse.Namespace(**{\n k: v\n for k, v in vars(args).items()\n if v is not None\n })\n\n from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY\n\n # The parser doesn't know about model/criterion/optimizer-specific args, so\n # we parse twice. First we parse the model/criterion/optimizer, then we\n # parse a second time after adding the *-specific arguments.\n # If input_args is given, we will parse those args instead of sys.argv.\n args, _ = parser.parse_known_args(input_args)\n\n # Add model-specific args to parser.\n if hasattr(args, 'arch'):\n model_specific_group = parser.add_argument_group(\n 'Model-specific configuration',\n # Only include attributes which are explicitly given as command-line\n # arguments or which have default values.\n argument_default=argparse.SUPPRESS,\n )\n ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)\n\n # Add *-specific args to parser.\n from fairseq.registry import REGISTRIES\n for registry_name, REGISTRY in REGISTRIES.items():\n choice = getattr(args, registry_name, None)\n if choice is not None:\n cls = REGISTRY['registry'][choice]\n if hasattr(cls, 'add_args'):\n cls.add_args(parser)\n if hasattr(args, 'task'):\n from fairseq.tasks import TASK_REGISTRY\n TASK_REGISTRY[args.task].add_args(parser)\n if getattr(args, 'use_bmuf', False):\n # hack to support extra args for block distributed data parallelism\n from fairseq.optim.bmuf import FairseqBMUF\n FairseqBMUF.add_args(parser)\n\n # Parse a second time.\n if parse_known:\n args, extra = parser.parse_known_args(input_args)\n else:\n args = parser.parse_args(input_args)\n extra = None\n\n # Post-process args.\n if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:\n args.max_sentences_valid = args.max_sentences\n if hasattr(args, 'max_tokens_valid') and args.max_tokens_valid is None:\n args.max_tokens_valid = args.max_tokens\n if getattr(args, 'memory_efficient_fp16', False):\n args.fp16 = True\n\n # Apply architecture configuration.\n if hasattr(args, 'arch'):\n ARCH_CONFIG_REGISTRY[args.arch](args)\n\n if parse_known:\n return args, extra\n else:\n return args\n\n\ndef get_parser(desc, default_task='translation'):\n # Before creating the true parser, we need to import optional user module\n # in order to eagerly import custom tasks, optimizers, architectures, etc.\n usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)\n usr_parser.add_argument('--user-dir', default=None)\n usr_args, _ = usr_parser.parse_known_args()\n utils.import_user_module(usr_args)\n\n parser = argparse.ArgumentParser(allow_abbrev=False)\n # fmt: off\n parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')\n parser.add_argument('--log-interval', type=int, default=1000, metavar='N',\n help='log progress every N batches (when progress bar is disabled)')\n parser.add_argument('--log-format', default=None, help='log format to use',\n choices=['json', 'none', 'simple', 'tqdm'])\n parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',\n help='path to save logs for tensorboard, should match --logdir '\n 'of running tensorboard (default: no tensorboard logging)')\n parser.add_argument(\"--tbmf-wrapper\", action=\"store_true\",\n help=\"[FB only] \")\n parser.add_argument('--seed', default=1, type=int, metavar='N',\n help='pseudo random number generator seed')\n parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')\n parser.add_argument('--fp16', action='store_true', help='use FP16')\n parser.add_argument('--memory-efficient-fp16', action='store_true',\n help='use a memory-efficient version of FP16 training; implies --fp16')\n parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,\n help='default FP16 loss scale')\n parser.add_argument('--fp16-scale-window', type=int,\n help='number of updates before increasing loss scale')\n parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,\n help='pct of updates that can overflow before decreasing the loss scale')\n parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',\n help='minimum FP16 loss scale, after which training is stopped')\n parser.add_argument('--threshold-loss-scale', type=float,\n help='threshold FP16 loss scale from below')\n parser.add_argument('--user-dir', default=None,\n help='path to a python module containing custom extensions (tasks and/or architectures)')\n\n from fairseq.registry import REGISTRIES\n for registry_name, REGISTRY in REGISTRIES.items():\n parser.add_argument(\n '--' + registry_name.replace('_', '-'),\n default=REGISTRY['default'],\n choices=REGISTRY['registry'].keys(),\n )\n\n # Task definitions can be found under fairseq/tasks/\n from fairseq.tasks import TASK_REGISTRY\n parser.add_argument('--task', metavar='TASK', default=default_task,\n choices=TASK_REGISTRY.keys(),\n help='task')\n # fmt: on\n return parser\n\n\ndef add_preprocess_args(parser):\n group = parser.add_argument_group('Preprocessing')\n # fmt: off\n group.add_argument(\"-s\", \"--source-lang\", default=None, metavar=\"SRC\",\n help=\"source language\")\n group.add_argument(\"-t\", \"--target-lang\", default=None, metavar=\"TARGET\",\n help=\"target language\")\n group.add_argument(\"--trainpref\", metavar=\"FP\", default=None,\n help=\"train file prefix\")\n group.add_argument(\"--validpref\", metavar=\"FP\", default=None,\n help=\"comma separated, valid file prefixes\")\n group.add_argument(\"--testpref\", metavar=\"FP\", default=None,\n help=\"comma separated, test file prefixes\")\n group.add_argument(\"--destdir\", metavar=\"DIR\", default=\"data-bin\",\n help=\"destination dir\")\n group.add_argument(\"--thresholdtgt\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--thresholdsrc\", metavar=\"N\", default=0, type=int,\n help=\"map words appearing less than threshold times to unknown\")\n group.add_argument(\"--tgtdict\", metavar=\"FP\",\n help=\"reuse given target dictionary\")\n group.add_argument(\"--srcdict\", metavar=\"FP\",\n help=\"reuse given source dictionary\")\n group.add_argument(\"--nwordstgt\", metavar=\"N\", default=-1, type=int,\n help=\"number of target words to retain\")\n group.add_argument(\"--nwordssrc\", metavar=\"N\", default=-1, type=int,\n help=\"number of source words to retain\")\n group.add_argument(\"--alignfile\", metavar=\"ALIGN\", default=None,\n help=\"an alignment file (optional)\")\n parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n group.add_argument(\"--joined-dictionary\", action=\"store_true\",\n help=\"Generate joined dictionary\")\n group.add_argument(\"--only-source\", action=\"store_true\",\n help=\"Only process the source language\")\n group.add_argument(\"--padding-factor\", metavar=\"N\", default=8, type=int,\n help=\"Pad dictionary size to be multiple of N\")\n group.add_argument(\"--workers\", metavar=\"N\", default=1, type=int,\n help=\"number of parallel workers\")\n # fmt: on\n return parser\n\n\ndef add_dataset_args(parser, train=False, gen=False):\n group = parser.add_argument_group('Dataset and data loading')\n # fmt: off\n group.add_argument('--num-workers', default=1, type=int, metavar='N',\n help='how many subprocesses to use for data loading')\n group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',\n help='ignore too long or too short lines in valid and test set')\n group.add_argument('--max-tokens', type=int, metavar='N',\n help='maximum number of tokens in a batch')\n group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',\n help='maximum number of sentences in a batch')\n group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',\n help='batch size will be a multiplier of this value')\n parser.add_argument('--dataset-impl', metavar='FORMAT',\n choices=get_available_dataset_impl(),\n help='output dataset implementation')\n if train:\n group.add_argument('--train-subset', default='train', metavar='SPLIT',\n choices=['train', 'valid', 'test'],\n help='data subset to use for training (train, valid, test)')\n group.add_argument('--valid-subset', default='valid', metavar='SPLIT',\n help='comma separated list of data subsets to use for validation'\n ' (train, valid, valid1, test, test1)')\n group.add_argument('--validate-interval', type=int, default=1, metavar='N',\n help='validate every N epochs')\n group.add_argument('--disable-validation', action='store_true',\n help='disable validation')\n group.add_argument('--max-tokens-valid', type=int, metavar='N',\n help='maximum number of tokens in a validation batch'\n ' (defaults to --max-tokens)')\n group.add_argument('--max-sentences-valid', type=int, metavar='N',\n help='maximum number of sentences in a validation batch'\n ' (defaults to --max-sentences)')\n group.add_argument('--curriculum', default=0, type=int, metavar='N',\n help='don\\'t shuffle batches for first N epochs')\n if gen:\n group.add_argument('--gen-subset', default='test', metavar='SPLIT',\n help='data subset to generate (train, valid, test)')\n group.add_argument('--num-shards', default=1, type=int, metavar='N',\n help='shard generation over N shards')\n group.add_argument('--shard-id', default=0, type=int, metavar='ID',\n help='id of the shard to generate (id < num_shards)')\n # fmt: on\n return group\n\n\ndef add_distributed_training_args(parser):\n group = parser.add_argument_group('Distributed training')\n # fmt: off\n group.add_argument('--distributed-world-size', type=int, metavar='N',\n default=max(1, torch.cuda.device_count()),\n help='total number of GPUs across all nodes (default: all visible GPUs)')\n group.add_argument('--distributed-rank', default=0, type=int,\n help='rank of the current worker')\n group.add_argument('--distributed-backend', default='nccl', type=str,\n help='distributed backend')\n group.add_argument('--distributed-init-method', default=None, type=str,\n help='typically tcp://hostname:port that will be used to '\n 'establish initial connetion')\n group.add_argument('--distributed-port', default=-1, type=int,\n help='port number (not required if using --distributed-init-method)')\n group.add_argument('--device-id', '--local_rank', default=0, type=int,\n help='which GPU to use (usually configured automatically)')\n group.add_argument('--distributed-no-spawn', action='store_true',\n help='do not spawn multiple processes even if multiple GPUs are visible')\n group.add_argument('--ddp-backend', default='c10d', type=str,\n choices=['c10d', 'no_c10d'],\n help='DistributedDataParallel backend')\n group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',\n help='bucket size for reduction')\n group.add_argument('--fix-batches-to-gpus', action='store_true',\n help='don\\'t shuffle batches between GPUs; this reduces overall '\n 'randomness and may affect precision but avoids the cost of '\n 're-reading the data')\n group.add_argument('--find-unused-parameters', default=False, action='store_true',\n help='disable unused parameter detection (not applicable to '\n 'no_c10d ddp-backend')\n # fmt: on\n return group\n\n\ndef add_optimization_args(parser):\n group = parser.add_argument_group('Optimization')\n # fmt: off\n group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',\n help='force stop training at specified epoch')\n group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',\n help='force stop training at specified update')\n group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',\n help='clip threshold of gradients')\n group.add_argument('--sentence-avg', action='store_true',\n help='normalize gradients by the number of sentences in a batch'\n ' (default is to normalize by number of tokens)')\n group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',\n type=lambda uf: eval_str_list(uf, type=int),\n help='update parameters every N_i batches, when in epoch i')\n group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,\n metavar='LR_1,LR_2,...,LR_N',\n help='learning rate for the first N epochs; all epochs >N using LR_N'\n ' (note: this may be interpreted differently depending on --lr-scheduler)')\n group.add_argument('--min-lr', default=-1, type=float, metavar='LR',\n help='stop training when the learning rate reaches this minimum')\n group.add_argument('--use-bmuf', default=False, action='store_true',\n help='specify global optimizer for syncing models on different GPUs/shards')\n # fmt: on\n return group\n\n\ndef add_checkpoint_args(parser):\n group = parser.add_argument_group('Checkpointing')\n # fmt: off\n group.add_argument('--save-dir', metavar='DIR', default='checkpoints',\n help='path to save checkpoints')\n group.add_argument('--restore-file', default='checkpoint_last.pt',\n help='filename from which to load checkpoint '\n '(default: <save-dir>/checkpoint_last.pt')\n group.add_argument('--reset-dataloader', action='store_true',\n help='if set, does not reload dataloader state from the checkpoint')\n group.add_argument('--reset-lr-scheduler', action='store_true',\n help='if set, does not load lr scheduler state from the checkpoint')\n group.add_argument('--reset-meters', action='store_true',\n help='if set, does not load meters from the checkpoint')\n group.add_argument('--reset-optimizer', action='store_true',\n help='if set, does not load optimizer state from the checkpoint')\n group.add_argument('--optimizer-overrides', default=\"{}\", type=str, metavar='DICT',\n help='a dictionary used to override optimizer args when loading a checkpoint')\n group.add_argument('--save-interval', type=int, default=1, metavar='N',\n help='save a checkpoint every N epochs')\n group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',\n help='save a checkpoint (and validate) every N updates')\n group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',\n help='keep the last N checkpoints saved with --save-interval-updates')\n group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',\n help='keep last N epoch checkpoints')\n group.add_argument('--no-save', action='store_true',\n help='don\\'t save models or checkpoints')\n group.add_argument('--no-epoch-checkpoints', action='store_true',\n help='only store last and best checkpoints')\n group.add_argument('--no-last-checkpoints', action='store_true',\n help='don\\'t store last checkpoints')\n group.add_argument('--no-save-optimizer-state', action='store_true',\n help='don\\'t save optimizer-state as part of checkpoint')\n group.add_argument('--best-checkpoint-metric', type=str, default='loss',\n help='metric to use for saving \"best\" checkpoints')\n group.add_argument('--maximize-best-checkpoint-metric', action='store_true',\n help='select the largest metric value for saving \"best\" checkpoints')\n group.add_argument('--flag-vocab-loading', type=int, default=1, metavar='N', \n help='set flag to load the checkpoint')\n group.add_argument('--old-checkpoint-save-dir', metavar='DIR', default='checkpoints',\n help='path to load the old checkpoints from')\n # fmt: on\n return group\n\n\ndef add_common_eval_args(group):\n # fmt: off\n group.add_argument('--path', metavar='FILE',\n help='path(s) to model file(s), colon separated')\n group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,\n help='remove BPE tokens before scoring (can be set to sentencepiece)')\n group.add_argument('--quiet', action='store_true',\n help='only print final scores')\n group.add_argument('--model-overrides', default=\"{}\", type=str, metavar='DICT',\n help='a dictionary used to override model args at generation '\n 'that were used during model training')\n group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,\n help='path to save eval results (optional)\"')\n # fmt: on\n\n\ndef add_eval_lm_args(parser):\n group = parser.add_argument_group('LM Evaluation')\n add_common_eval_args(group)\n # fmt: off\n group.add_argument('--output-word-probs', action='store_true',\n help='if set, outputs words and their predicted log probabilities to standard output')\n group.add_argument('--output-word-stats', action='store_true',\n help='if set, outputs word statistics such as word count, average probability, etc')\n group.add_argument('--context-window', default=0, type=int, metavar='N',\n help='ensures that every evaluated token has access to a context of at least this size,'\n ' if possible')\n group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',\n help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'\n ' in order to fit into GPU memory')\n # fmt: on\n\n\ndef add_generation_args(parser):\n group = parser.add_argument_group('Generation')\n add_common_eval_args(group)\n # fmt: off\n group.add_argument('--beam', default=5, type=int, metavar='N',\n help='beam size')\n group.add_argument('--nbest', default=1, type=int, metavar='N',\n help='number of hypotheses to output')\n group.add_argument('--max-len-a', default=0, type=float, metavar='N',\n help=('generate sequences of maximum length ax + b, '\n 'where x is the source length'))\n group.add_argument('--max-len-b', default=200, type=int, metavar='N',\n help=('generate sequences of maximum length ax + b, '\n 'where x is the source length'))\n group.add_argument('--min-len', default=1, type=float, metavar='N',\n help=('minimum generation length'))\n group.add_argument('--match-source-len', default=False, action='store_true',\n help=('generations should match the source length'))\n group.add_argument('--no-early-stop', action='store_true',\n help='deprecated')\n group.add_argument('--unnormalized', action='store_true',\n help='compare unnormalized hypothesis scores')\n group.add_argument('--no-beamable-mm', action='store_true',\n help='don\\'t use BeamableMM in attention layers')\n group.add_argument('--lenpen', default=1, type=float,\n help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')\n group.add_argument('--unkpen', default=0, type=float,\n help='unknown word penalty: <0 produces more unks, >0 produces fewer')\n group.add_argument('--replace-unk', nargs='?', const=True, default=None,\n help='perform unknown replacement (optionally with alignment dictionary)')\n group.add_argument('--sacrebleu', action='store_true',\n help='score with sacrebleu')\n group.add_argument('--score-reference', action='store_true',\n help='just score the reference translation')\n group.add_argument('--prefix-size', default=0, type=int, metavar='PS',\n help='initialize generation by target prefix of given length')\n group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',\n help='ngram blocking such that this size ngram cannot be repeated in the generation')\n group.add_argument('--sampling', action='store_true',\n help='sample hypotheses instead of using beam search')\n group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',\n help='sample from top K likely next words instead of all words')\n group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',\n help='sample from the smallest set whose cumulative probability mass exceeds p for next words')\n group.add_argument('--temperature', default=1., type=float, metavar='N',\n help='temperature for generation')\n group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',\n help='number of groups for Diverse Beam Search')\n group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',\n help='strength of diversity penalty for Diverse Beam Search')\n group.add_argument('--print-alignment', action='store_true',\n help='if set, uses attention feedback to compute and print alignment to source tokens')\n # fmt: on\n return group\n\n\ndef add_interactive_args(parser):\n group = parser.add_argument_group('Interactive')\n # fmt: off\n group.add_argument('--buffer-size', default=0, type=int, metavar='N',\n help='read this many sentences into a buffer before processing them')\n group.add_argument('--input', default='-', type=str, metavar='FILE',\n help='file to read from; use - for stdin')\n # fmt: on\n\n\ndef add_model_args(parser):\n group = parser.add_argument_group('Model configuration')\n # fmt: off\n\n # Model definitions can be found under fairseq/models/\n #\n # The model architecture can be specified in several ways.\n # In increasing order of priority:\n # 1) model defaults (lowest priority)\n # 2) --arch argument\n # 3) --encoder/decoder-* arguments (highest priority)\n from fairseq.models import ARCH_MODEL_REGISTRY\n group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', required=True,\n choices=ARCH_MODEL_REGISTRY.keys(),\n help='Model Architecture')\n # fmt: on\n return group\n"
] |
[
[
"torch.cuda.device_count"
]
] |
L30bigdick/Nashpy
|
[
"2033b4ffe6804efb50a324116e7c3776932546f3"
] |
[
"src/nashpy/utils/is_best_response.py"
] |
[
"\"\"\"Functions for testing of best responses\"\"\"\nimport numpy as np\n\n\ndef is_best_response(A: np.ndarray, sigma_c: np.ndarray, sigma_r: np.ndarray) -> bool:\n \"\"\"\n Checks if sigma_r is a best response to sigma_c when A is the payoff matrix\n for the player playing sigma_r.\n\n Parameters\n ----------\n A : array\n The row player payoff matrix\n sigma_c : array\n The column player strategy\n sigma_r : array\n The row player strategy\n\n Returns\n -------\n bool\n If True it indicates that sigma_r is a best response to sigma_c\n \"\"\"\n row_utilities = A @ sigma_c\n max_utility = np.max(row_utilities)\n return all(row_utilities[sigma_r > 0] == max_utility)\n"
] |
[
[
"numpy.max"
]
] |
ParikhKadam/zenml
|
[
"867e4d4c982a50447bd182b30af37f2141dac5a4",
"867e4d4c982a50447bd182b30af37f2141dac5a4"
] |
[
"legacy/steps/split/utils_test.py",
"legacy/steps/split/split_step_test.py"
] |
[
"# Copyright (c) ZenML GmbH 2020. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Split step utils tests.\"\"\"\n\nimport pytest\nimport tensorflow as tf\n\nfrom zenml.steps.split.utils import get_categorical_value, \\\n partition_cat_list\n\n\ndef test_get_categorical_value():\n\n feature = {\"int64cat_col\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[1])),\n \"floatcat_col\":\n tf.train.Feature(float_list=tf.train.FloatList(value=[42.0])),\n \"bytescat_col\":\n tf.train.Feature(bytes_list=tf.train.BytesList(\n value=[b\"test_string\"]))\n }\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n int_value = get_categorical_value(tf_example, cat_col=\"int64cat_col\")\n float_value = get_categorical_value(tf_example, cat_col=\"floatcat_col\")\n str_value = get_categorical_value(tf_example, cat_col=\"bytescat_col\")\n\n # ensure type consistency\n assert isinstance(int_value, int)\n assert isinstance(float_value, float)\n assert isinstance(str_value, str)\n\n # tests for a categorical feature that is not in the data\n with pytest.raises(AssertionError):\n _ = get_categorical_value(tf_example, cat_col=\"testest\")\n\n\ndef test_partition_cat_list():\n cat_list = [f\"cat{i}\" for i in range(10)]\n\n c_ratio = {\"train\": 0.5,\n \"eval\": 0.3,\n \"test\": 0.2}\n\n cat_dict = partition_cat_list(cat_list=cat_list, c_ratio=c_ratio)\n\n assert [len(li) for li in cat_dict.values()] == [5, 3, 2]\n\n # test: two categories, but three folds\n cat_list = [f\"cat{i}\" for i in range(2)]\n\n c_ratio = {\"train\": 0.33,\n \"eval\": 0.33,\n \"test\": 0.34}\n\n cat_dict = partition_cat_list(cat_list=cat_list, c_ratio=c_ratio)\n\n # expected: train and eval each get one, test empty\n assert [len(li) for li in cat_dict.values()] == [1, 0, 1]\n\n # test: three categories and three folds\n cat_list = [f\"cat{i}\" for i in range(3)]\n\n c_ratio = {\"train\": 0.33,\n \"eval\": 0.33,\n \"test\": 0.34}\n\n cat_dict = partition_cat_list(cat_list=cat_list, c_ratio=c_ratio)\n\n # expected: train, eval, test each get one\n assert [len(li) for li in cat_dict.values()] == [1, 1, 1]\n",
"# Copyright (c) ZenML GmbH 2020. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Tests for different split steps.\"\"\"\n\nimport random\n\nimport pytest\nimport tensorflow as tf\n\nfrom zenml.steps.split import CategoricalDomainSplit\nfrom zenml.steps.split import CategoricalRatioSplit\nfrom zenml.steps.split import NoSplit\nfrom zenml.steps.split import RandomSplit\n\n\n@pytest.fixture\ndef create_random_dummy_data():\n def create_data():\n cat_col = \"my_cat_col\"\n possible_values = [\"value{}\".format(i + 1) for i in range(3)]\n dummy_data = []\n\n for i in range(10):\n value = random.choice(possible_values).encode()\n feature = {cat_col: tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value]))}\n\n dummy_data.append(tf.train.Example(\n features=tf.train.Features(feature=feature)))\n\n return dummy_data\n\n return create_data\n\n\n@pytest.fixture\ndef create_structured_dummy_data():\n def create_data(counts):\n\n cat_col = \"my_cat_col\"\n possible_values = [\"value{}\".format(i + 1) for i in range(len(counts))]\n dummy_data = []\n\n for i, nums in enumerate(counts):\n value = possible_values[i].encode()\n feature = {cat_col: tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[value]))}\n for n in range(nums):\n dummy_data.append(tf.train.Example(\n features=tf.train.Features(feature=feature)))\n\n return dummy_data\n\n return create_data\n\n\ndef test_no_split(create_random_dummy_data):\n nosplit = NoSplit()\n\n # test defaults\n assert not nosplit.schema\n assert not nosplit.statistics\n\n dummy_data = create_random_dummy_data()\n\n split_folds = [nosplit.partition_fn(ex, nosplit.get_num_splits())\n for ex in dummy_data]\n\n # assert nosplit returns only one index\n assert all(fold == 0 for fold in split_folds)\n\n\ndef test_random_split(create_random_dummy_data):\n one_fold = {\"train\": 1.0}\n\n # only one argument present in split map\n with pytest.raises(AssertionError):\n _ = RandomSplit(split_map=one_fold)\n\n bogus_entries = {\"train\": 0.5,\n \"eval\": \"testest\"}\n\n # not all entries in split map are floats\n with pytest.raises(AssertionError):\n _ = RandomSplit(split_map=bogus_entries)\n\n split_map = {\"train\": 1.0,\n \"eval\": 0.0}\n\n random_split = RandomSplit(split_map=split_map)\n\n # test defaults\n assert not random_split.schema\n assert not random_split.statistics\n\n dummy_data = create_random_dummy_data()\n\n split_folds = [random_split.partition_fn(ex, random_split.get_num_splits())\n for ex in dummy_data]\n\n # artificial no split result tests, everything else is random\n assert all(fold == 0 for fold in split_folds)\n\n\ndef test_categorical_domain_split(create_structured_dummy_data):\n cat_col = \"my_cat_col\"\n\n one_fold = {\"train\": []}\n\n # only one argument present in split map\n with pytest.raises(AssertionError):\n _ = CategoricalDomainSplit(categorical_column=cat_col,\n split_map=one_fold)\n\n # real logic begins here\n split_map = {\"train\": [\"value1\"],\n \"eval\": [\"value2\"],\n \"test\": [\"value3\"]}\n\n domain_split = CategoricalDomainSplit(categorical_column=cat_col,\n split_map=split_map)\n\n # test defaults\n assert not domain_split.schema\n assert not domain_split.statistics\n\n # each categorical value in the split map above gets one example\n counts = [1, 1, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [domain_split.partition_fn(ex, domain_split.get_num_splits())\n for ex in dummy_data]\n\n # fold indices, zero-based, should correspond to their dict counterparts\n assert split_folds == [0, 1, 2]\n\n # each categorical value in the split map above gets one example,\n # plus one out-of-split-map (unseen example)\n counts = [1, 1, 1, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [domain_split.partition_fn(ex, domain_split.get_num_splits())\n for ex in dummy_data]\n\n assert domain_split.get_num_splits() == 4\n\n # default behavior is skipping (index n-1 = 3)\n assert split_folds == [0, 1, 2, 3]\n\n # test whether eval assignment works\n domain_split.unknown_category_policy = \"eval\"\n\n # each categorical value in the split map above gets one example,\n # plus one out-of-split-map (unseen example)\n counts = [1, 1, 1, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [domain_split.partition_fn(ex, domain_split.get_num_splits())\n for ex in dummy_data]\n\n assert domain_split.get_num_splits() == 3\n\n # default behavior is eval (index 1)\n assert split_folds == [0, 1, 2, 1]\n\n\ndef test_categorical_ratio_split(create_structured_dummy_data):\n cat_col = \"my_cat_col\"\n\n categories = []\n\n one_fold = {\"train\": 1.0}\n\n # only one fold present in split map\n with pytest.raises(AssertionError):\n _ = CategoricalRatioSplit(categorical_column=cat_col,\n categories=categories,\n split_ratio=one_fold)\n\n # real logic begins here\n categories = [\"value{}\".format(i + 1) for i in range(3)]\n\n split_ratio = {\"train\": 0.33,\n \"eval\": 0.33,\n \"test\": 0.34}\n\n ratio_split = CategoricalRatioSplit(categorical_column=cat_col,\n categories=categories,\n split_ratio=split_ratio)\n\n # test defaults\n assert not ratio_split.schema\n assert not ratio_split.statistics\n\n # each categorical value in the split map above gets one example\n counts = [1, 1, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [ratio_split.partition_fn(ex, ratio_split.get_num_splits())\n for ex in dummy_data]\n\n # fold indices, zero-based, should correspond to their dict counterparts\n assert split_folds == [0, 1, 2]\n\n # each categorical value in the split map above gets one example,\n # plus one out-of-split-map (unseen example)\n counts = [1, 1, 1, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [ratio_split.partition_fn(ex, ratio_split.get_num_splits())\n for ex in dummy_data]\n\n assert ratio_split.get_num_splits() == 4\n\n # default behavior is assigning everything into eval (index 1)\n assert split_folds == [0, 1, 2, 3]\n\n # test whether eval assignment works\n ratio_split.unknown_category_policy = \"eval\"\n\n # each categorical value in the split map above gets one example,\n # plus one out-of-split-map (unseen example)\n counts = [1, 1, 1, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [ratio_split.partition_fn(ex, ratio_split.get_num_splits())\n for ex in dummy_data]\n\n assert ratio_split.get_num_splits() == 3\n\n # default behavior is eval (index 1)\n assert split_folds == [0, 1, 2, 1]\n\n\ndef test_categorical_split_ordering(create_structured_dummy_data):\n cat_col = \"my_cat_col\"\n\n # real logic begins here\n split_map = {\"train\": [\"value1\"],\n \"eval\": [\"value2\"]}\n\n domain_split = CategoricalDomainSplit(categorical_column=cat_col,\n split_map=split_map)\n\n # each categorical value in the split map above gets one example\n counts = [3, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [domain_split.partition_fn(ex, domain_split.get_num_splits())\n for ex in dummy_data]\n\n # expected: first 3 are 0 (value1, going into train),\n # last is 1 (value2, going into eval)\n assert split_folds == [0, 0, 0, 1]\n assert domain_split.get_split_names() == [\"train\", \"eval\", \"skip\"]\n\n ################################################\n # NOW: Order reversed, eval comes before train #\n ################################################\n\n # same split map, fold orders reversed\n split_map = {\"eval\": [\"value2\"],\n \"train\": [\"value1\"]}\n\n domain_split = CategoricalDomainSplit(categorical_column=cat_col,\n split_map=split_map)\n\n # value_1 gets 3 examples, value_2 gets 1 example\n counts = [3, 1]\n\n dummy_data = create_structured_dummy_data(counts)\n\n split_folds = [domain_split.partition_fn(ex, domain_split.get_num_splits())\n for ex in dummy_data]\n\n # expected: first 3 are 1 (value1, going into train), last is 0 (eval)\n assert split_folds == [1, 1, 1, 0]\n assert domain_split.get_split_names() == [\"eval\", \"train\", \"skip\"]\n"
] |
[
[
"tensorflow.train.BytesList",
"tensorflow.train.FloatList",
"tensorflow.train.Features",
"tensorflow.train.Int64List"
],
[
"tensorflow.train.Features",
"tensorflow.train.BytesList"
]
] |
yhl48/transformers
|
[
"b320d87eceb369ea22d5cd73866499851cb2cca3"
] |
[
"src/transformers/models/wav2vec2/modeling_wav2vec2.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Wav2Vec2 model.\"\"\"\n\nimport math\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...deepspeed import is_deepspeed_zero3_enabled\nfrom ...modeling_outputs import (\n BaseModelOutput,\n CausalLMOutput,\n MaskedLMOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import torch_int_div\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_wav2vec2 import Wav2Vec2Config\n\n\nlogger = logging.get_logger(__name__)\n\n\n_HIDDEN_STATES_START_POSITION = 2\n\n# General docstring\n_CONFIG_FOR_DOC = \"Wav2Vec2Config\"\n_PROCESSOR_FOR_DOC = \"Wav2Vec2Processor\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"facebook/wav2vec2-base-960h\"\n_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]\n\n# CTC docstring\n_CTC_EXPECTED_OUTPUT = \"'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'\"\n_CTC_EXPECTED_LOSS = 53.48\n\n# Audio class docstring\n_FEAT_EXTRACTOR_FOR_DOC = \"Wav2Vec2FeatureExtractor\"\n_SEQ_CLASS_CHECKPOINT = \"superb/wav2vec2-base-superb-ks\"\n_SEQ_CLASS_EXPECTED_OUTPUT = \"'_unknown_'\"\n_SEQ_CLASS_EXPECTED_LOSS = 6.54\n\n# Frame class docstring\n_FRAME_CLASS_CHECKPOINT = \"anton-l/wav2vec2-base-superb-sd\"\n_FRAME_EXPECTED_OUTPUT = [0, 0]\n\n# Speaker Verification docstring\n_XVECTOR_CHECKPOINT = \"anton-l/wav2vec2-base-superb-sv\"\n_XVECTOR_EXPECTED_OUTPUT = 0.98\n\n\nWAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/wav2vec2-base-960h\",\n \"facebook/wav2vec2-large-960h\",\n \"facebook/wav2vec2-large-960h-lv60\",\n \"facebook/wav2vec2-large-960h-lv60-self\",\n # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2\n]\n\n\n@dataclass\nclass Wav2Vec2BaseModelOutput(ModelOutput):\n \"\"\"\n Output type of [`Wav2Vec2BaseModelOutput`], with potential hidden states and attentions.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`):\n Sequence of extracted feature vectors of the last convolutional layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n extract_features: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass Wav2Vec2ForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of [`Wav2Vec2ForPreTraining`], with potential hidden states and attentions.\n\n Args:\n loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):\n Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official\n paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.\n projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):\n Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked\n projected quantized states.\n projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):\n Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive\n target vectors for contrastive loss.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):\n The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .\n diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):\n The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n projected_states: torch.FloatTensor = None\n projected_quantized_states: torch.FloatTensor = None\n codevector_perplexity: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n contrastive_loss: Optional[torch.FloatTensor] = None\n diversity_loss: Optional[torch.FloatTensor] = None\n\n\n@dataclass\nclass XVectorOutput(ModelOutput):\n \"\"\"\n Output type of [`Wav2Vec2ForXVector`].\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):\n Classification hidden states before AMSoftmax.\n embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):\n Utterance embeddings used for vector similarity-based retrieval.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n embeddings: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\ndef _compute_mask_indices(\n shape: Tuple[int, int],\n mask_prob: float,\n mask_length: int,\n attention_mask: Optional[torch.LongTensor] = None,\n min_masks: int = 0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\n ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on\n CPU as part of the preprocessing during training.\n\n Args:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension.\n \"\"\"\n batch_size, sequence_length = shape\n\n if mask_length < 1:\n raise ValueError(\"`mask_length` has to be bigger than 0.\")\n\n if mask_length > sequence_length:\n raise ValueError(\n f\"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}\"\n f\" and `sequence_length`: {sequence_length}`\"\n )\n\n # epsilon is used for probabilistic rounding\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n\n # make sure num masked span <= sequence_length\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n\n # make sure num_masked span is also <= input_length - (mask_length - 1)\n if input_length - (mask_length - 1) < num_masked_span:\n num_masked_span = max(input_length - (mask_length - 1), 0)\n\n return num_masked_span\n\n # compute number of masked spans in batch\n input_lengths = (\n attention_mask.sum(-1).detach().tolist()\n if attention_mask is not None\n else [sequence_length for _ in range(batch_size)]\n )\n\n # SpecAugment mask to fill\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)\n spec_aug_mask_idxs = []\n\n max_num_masked_span = compute_num_masked_span(sequence_length)\n\n if max_num_masked_span == 0:\n return spec_aug_mask\n\n for input_length in input_lengths:\n # compute num of masked spans for this input\n num_masked_span = compute_num_masked_span(input_length)\n\n # get random indices to mask\n spec_aug_mask_idx = np.random.choice(\n np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False\n )\n\n # pick first sampled index that will serve as a dummy index to pad vector\n # to ensure same dimension for all batches due to probabilistic rounding\n # Picking first sample just pads those vectors twice.\n if len(spec_aug_mask_idx) == 0:\n # this case can only happen if `input_length` is strictly smaller then\n # `sequence_length` in which case the last token has to be a padding\n # token which we can use as a dummy mask id\n dummy_mask_idx = sequence_length - 1\n else:\n dummy_mask_idx = spec_aug_mask_idx[0]\n\n spec_aug_mask_idx = np.concatenate(\n [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]\n )\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n\n # expand masked indices to masked spans\n spec_aug_mask_idxs = np.broadcast_to(\n spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n\n # add offset to the starting indexes so that that indexes now create a span\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(\n batch_size, max_num_masked_span * mask_length\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n\n # ensure that we cannot have indices larger than sequence_length\n if spec_aug_mask_idxs.max() > sequence_length - 1:\n spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1\n\n # scatter indices to mask\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n\n return spec_aug_mask\n\n\ndef _sample_negative_indices(\n features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None\n):\n \"\"\"\n Sample `num_negatives` vectors from feature vectors.\n \"\"\"\n batch_size, sequence_length = features_shape\n\n # generate indices of the positive vectors themselves, repeat them `num_negatives` times\n sequence_length_range = np.arange(sequence_length)\n\n # get `num_negatives` random vector indices from the same utterance\n sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)\n\n mask_time_indices = (\n mask_time_indices.astype(np.bool) if mask_time_indices is not None else np.ones(features_shape, dtype=np.bool)\n )\n\n for batch_idx in range(batch_size):\n high = mask_time_indices[batch_idx].sum() - 1\n mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]\n\n feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))\n sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))\n # avoid sampling the same positive vector, but keep the distribution uniform\n sampled_indices[sampled_indices >= feature_indices] += 1\n\n # remap to actual indices\n sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]\n\n # correct for batch size\n sampled_negative_indices[batch_idx] += batch_idx * sequence_length\n\n return sampled_negative_indices\n\n\nclass Wav2Vec2NoLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\nclass Wav2Vec2LayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n\n hidden_states = hidden_states.transpose(-2, -1)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states.transpose(-2, -1)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\nclass Wav2Vec2GroupNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\nclass Wav2Vec2PositionalConvEmbedding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.hidden_size,\n config.hidden_size,\n kernel_size=config.num_conv_pos_embeddings,\n padding=config.num_conv_pos_embeddings // 2,\n groups=config.num_conv_pos_embedding_groups,\n )\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_v)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_g)\n else:\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n\n self.padding = Wav2Vec2SamePadLayer(config.num_conv_pos_embeddings)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n\n hidden_states = self.conv(hidden_states)\n hidden_states = self.padding(hidden_states)\n hidden_states = self.activation(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\nclass Wav2Vec2SamePadLayer(nn.Module):\n def __init__(self, num_conv_pos_embeddings):\n super().__init__()\n self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0\n\n def forward(self, hidden_states):\n if self.num_pad_remove > 0:\n hidden_states = hidden_states[:, :, : -self.num_pad_remove]\n return hidden_states\n\n\nclass Wav2Vec2FeatureEncoder(nn.Module):\n \"\"\"Construct the features from raw audio waveform\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n if config.feat_extract_norm == \"group\":\n conv_layers = [Wav2Vec2GroupNormConvLayer(config, layer_id=0)] + [\n Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)\n ]\n elif config.feat_extract_norm == \"layer\":\n conv_layers = [\n Wav2Vec2LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)\n ]\n else:\n raise ValueError(\n f\"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']\"\n )\n self.conv_layers = nn.ModuleList(conv_layers)\n self.gradient_checkpointing = False\n self._requires_grad = True\n\n def _freeze_parameters(self):\n for param in self.parameters():\n param.requires_grad = False\n self._requires_grad = False\n\n def forward(self, input_values):\n hidden_states = input_values[:, None]\n\n # make sure hidden_states require grad for gradient_checkpointing\n if self._requires_grad and self.training:\n hidden_states.requires_grad = True\n\n for conv_layer in self.conv_layers:\n if self._requires_grad and self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(conv_layer),\n hidden_states,\n )\n else:\n hidden_states = conv_layer(hidden_states)\n\n return hidden_states\n\n\nclass Wav2Vec2FeatureExtractor(Wav2Vec2FeatureEncoder):\n def __init__(self, config):\n super().__init__(config)\n warnings.warn(\n f\"The class `{self.__class__.__name__}` has been depreciated \"\n \"and will be removed in Transformers v5. \"\n f\"Use `{self.__class__.__bases__[0].__name__}` instead.\",\n FutureWarning,\n )\n\n\nclass Wav2Vec2FeatureProjection(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)\n self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)\n self.dropout = nn.Dropout(config.feat_proj_dropout)\n\n def forward(self, hidden_states):\n # non-projected hidden states are needed for quantization\n norm_hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.projection(norm_hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states, norm_hidden_states\n\n\n# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Wav2Vec2\nclass Wav2Vec2Attention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n\n bsz, tgt_len, _ = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if layer_head_mask is not None:\n if layer_head_mask.size() != (self.num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}\"\n )\n attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if output_attentions:\n # this operation is a bit awkward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to be reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"\n )\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n\n # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be\n # partitioned aross GPUs when using tensor-parallelism.\n attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\nclass Wav2Vec2FeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate_dropout = nn.Dropout(config.activation_dropout)\n\n self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.output_dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.intermediate_dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.intermediate_dropout(hidden_states)\n\n hidden_states = self.output_dense(hidden_states)\n hidden_states = self.output_dropout(hidden_states)\n return hidden_states\n\n\nclass Wav2Vec2EncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = Wav2Vec2Attention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=False,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = Wav2Vec2FeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states, attn_weights, _ = self.attention(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states + self.feed_forward(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass Wav2Vec2EncoderLayerStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = Wav2Vec2Attention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=False,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = Wav2Vec2FeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states = self.layer_norm(hidden_states)\n hidden_states, attn_weights, _ = self.attention(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass Wav2Vec2Encoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList([Wav2Vec2EncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens output 0\n hidden_states[~attention_mask] = 0.0\n\n # extend attention_mask\n attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.expand(\n attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]\n )\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n\n for layer in self.layers:\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = layer(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass Wav2Vec2EncoderStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [Wav2Vec2EncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens are not attended to\n hidden_states[~attention_mask] = 0\n\n # extend attention_mask\n attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.expand(\n attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]\n )\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n\n for layer in self.layers:\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = layer(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass Wav2Vec2GumbelVectorQuantizer(nn.Module):\n \"\"\"\n Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH\n GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.num_groups = config.num_codevector_groups\n self.num_vars = config.num_codevectors_per_group\n\n if config.codevector_dim % self.num_groups != 0:\n raise ValueError(\n f\"`config.codevector_dim {config.codevector_dim} must be divisible \"\n f\"by `config.num_codevector_groups` {self.num_groups} for concatenation\"\n )\n\n # storage for codebook variables (codewords)\n self.codevectors = nn.Parameter(\n torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)\n )\n self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)\n\n # can be decayed for training\n self.temperature = 2\n\n @staticmethod\n def _compute_perplexity(probs, mask=None):\n if mask is not None:\n mask_extended = mask.flatten()[:, None, None].expand(probs.shape)\n probs = torch.where(mask_extended, probs, torch.zeros_like(probs))\n marginal_probs = probs.sum(dim=0) / mask.sum()\n else:\n marginal_probs = probs.mean(dim=0)\n\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n\n def forward(self, hidden_states, mask_time_indices=None):\n batch_size, sequence_length, hidden_size = hidden_states.shape\n\n # project to codevector dim\n hidden_states = self.weight_proj(hidden_states)\n hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)\n\n if self.training:\n # sample code vector probs via gumbel in differentiateable way\n codevector_probs = nn.functional.gumbel_softmax(\n hidden_states.float(), tau=self.temperature, hard=True\n ).type_as(hidden_states)\n\n # compute perplexity\n codevector_soft_dist = torch.softmax(\n hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1\n )\n perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices)\n else:\n # take argmax in non-differentiable way\n # comptute hard codevector distribution (one hot)\n codevector_idx = hidden_states.argmax(dim=-1)\n codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(\n -1, codevector_idx.view(-1, 1), 1.0\n )\n codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)\n\n perplexity = self._compute_perplexity(codevector_probs, mask_time_indices)\n\n codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)\n # use probs to retrieve codevectors\n codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors\n codevectors = (\n codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)\n .sum(-2)\n .view(batch_size, sequence_length, -1)\n )\n\n return codevectors, perplexity\n\n\nclass Wav2Vec2Adapter(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n # feature dim might need to be down-projected\n if config.output_hidden_size != config.hidden_size:\n self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)\n self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)\n else:\n self.proj = self.proj_layer_norm = None\n\n self.layers = nn.ModuleList(Wav2Vec2AdapterLayer(config) for _ in range(config.num_adapter_layers))\n self.layerdrop = config.layerdrop\n\n def forward(self, hidden_states):\n # down project hidden_states if necessary\n if self.proj is not None and self.proj_layer_norm is not None:\n hidden_states = self.proj(hidden_states)\n hidden_states = self.proj_layer_norm(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n\n for layer in self.layers:\n layerdrop_prob = np.random.random()\n if not self.training or (layerdrop_prob > self.layerdrop):\n hidden_states = layer(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\nclass Wav2Vec2AdapterLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.output_hidden_size,\n 2 * config.output_hidden_size,\n config.adapter_kernel_size,\n stride=config.adapter_stride,\n padding=1,\n )\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = nn.functional.glu(hidden_states, dim=1)\n\n return hidden_states\n\n\nclass Wav2Vec2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = Wav2Vec2Config\n base_model_prefix = \"wav2vec2\"\n main_input_name = \"input_values\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n # gumbel softmax requires special init\n if isinstance(module, Wav2Vec2GumbelVectorQuantizer):\n module.weight_proj.weight.data.normal_(mean=0.0, std=1)\n module.weight_proj.bias.data.zero_()\n nn.init.uniform_(module.codevectors)\n elif isinstance(module, Wav2Vec2PositionalConvEmbedding):\n nn.init.normal_(\n module.conv.weight,\n mean=0,\n std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),\n )\n nn.init.constant_(module.conv.bias, 0)\n elif isinstance(module, Wav2Vec2FeatureProjection):\n k = math.sqrt(1 / module.projection.in_features)\n nn.init.uniform_(module.projection.weight, a=-k, b=k)\n nn.init.uniform_(module.projection.bias, a=-k, b=k)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, nn.Conv1d):\n nn.init.kaiming_normal_(module.weight)\n\n if module.bias is not None:\n k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))\n nn.init.uniform_(module.bias, a=-k, b=k)\n\n def _get_feat_extract_output_lengths(\n self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None\n ):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n\n add_adapter = self.config.add_adapter if add_adapter is None else add_adapter\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return torch_int_div(input_length - kernel_size, stride) + 1\n\n for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):\n input_lengths = _conv_out_length(input_lengths, kernel_size, stride)\n\n if add_adapter:\n for _ in range(self.config.num_adapter_layers):\n input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(\n self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None\n ):\n # Effectively attention_mask.sum(-1), but not inplace to be able to run\n # on inference mode.\n non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]\n\n output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)\n output_lengths = output_lengths.to(torch.long)\n\n batch_size = attention_mask.shape[0]\n\n attention_mask = torch.zeros(\n (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n # these two operations makes sure that all values before the output lengths idxs are attended to\n attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()\n return attention_mask\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (Wav2Vec2Encoder, Wav2Vec2EncoderStableLayerNorm, Wav2Vec2FeatureEncoder)):\n module.gradient_checkpointing = value\n\n\nWAV_2_VEC_2_START_DOCSTRING = r\"\"\"\n Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech\n Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael\n Auli.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\nWAV_2_VEC_2_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding\n and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, such as\n [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be\n passed to avoid degraded performance when doing batched inference. For such models `input_values` should\n simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly\n different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.\",\n WAV_2_VEC_2_START_DOCSTRING,\n)\nclass Wav2Vec2Model(Wav2Vec2PreTrainedModel):\n def __init__(self, config: Wav2Vec2Config):\n super().__init__(config)\n self.config = config\n self.feature_extractor = Wav2Vec2FeatureEncoder(config)\n self.feature_projection = Wav2Vec2FeatureProjection(config)\n\n # model only needs masking vector if mask prob is > 0.0\n if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:\n self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())\n\n if config.do_stable_layer_norm:\n self.encoder = Wav2Vec2EncoderStableLayerNorm(config)\n else:\n self.encoder = Wav2Vec2Encoder(config)\n\n self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.feature_extractor._freeze_parameters()\n\n def _mask_hidden_states(\n self,\n hidden_states: torch.FloatTensor,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Masks extracted features along time axis and/or along feature axis according to\n [SpecAugment](https://arxiv.org/abs/1904.08779).\n \"\"\"\n\n # `config.apply_spec_augment` can set masking to False\n if not getattr(self.config, \"apply_spec_augment\", True):\n return hidden_states\n\n # generate indices & apply SpecAugment along time axis\n batch_size, sequence_length, hidden_size = hidden_states.size()\n\n if mask_time_indices is not None:\n # apply SpecAugment along time axis with given mask_time_indices\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n elif self.config.mask_time_prob > 0 and self.training:\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.config.mask_time_prob,\n mask_length=self.config.mask_time_length,\n attention_mask=attention_mask,\n min_masks=self.config.mask_time_min_masks,\n )\n mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n\n if self.config.mask_feature_prob > 0 and self.training:\n # generate indices & apply SpecAugment along feature axis\n mask_feature_indices = _compute_mask_indices(\n (batch_size, hidden_size),\n mask_prob=self.config.mask_feature_prob,\n mask_length=self.config.mask_feature_length,\n min_masks=self.config.mask_feature_min_masks,\n )\n mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)\n mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)\n hidden_states[mask_feature_indices] = 0\n\n return hidden_states\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Wav2Vec2BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n mask_time_indices=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(\n extract_features.shape[1], attention_mask, add_adapter=False\n )\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.adapter is not None:\n hidden_states = self.adapter(hidden_states)\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return Wav2Vec2BaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"Wav2Vec2 Model with a quantizer and `VQ` head on top.\"\"\", WAV_2_VEC_2_START_DOCSTRING)\nclass Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel):\n def __init__(self, config: Wav2Vec2Config):\n super().__init__(config)\n self.wav2vec2 = Wav2Vec2Model(config)\n self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)\n\n self.quantizer = Wav2Vec2GumbelVectorQuantizer(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n # make sure that project_hid & project_q are initialized like normal linear layers\n self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim)\n self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)\n\n def set_gumbel_temperature(self, temperature: int):\n \"\"\"\n Set the Gumbel softmax temperature to a given value. Only necessary for training\n \"\"\"\n self.quantizer.temperature = temperature\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wav2vec2.feature_extractor._freeze_parameters()\n\n @staticmethod\n def compute_contrastive_logits(\n target_features: torch.FloatTensor,\n negative_features: torch.FloatTensor,\n predicted_features: torch.FloatTensor,\n temperature: int = 0.1,\n ):\n \"\"\"\n Compute logits for contrastive loss based using cosine similarity as the distance measure between\n `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.\n \"\"\"\n target_features = torch.cat([target_features, negative_features], dim=0)\n\n logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1).type_as(\n target_features\n )\n\n # apply temperature\n logits = logits / temperature\n return logits\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Wav2Vec2ForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_values,\n attention_mask=None,\n mask_time_indices=None,\n sampled_negative_indices=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict\n masked extracted features in *config.proj_codevector_dim* space.\n sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):\n Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.\n Required input for pre-training.\n\n Returns:\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining\n >>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices\n >>> from datasets import load_dataset\n >>> import soundfile as sf\n\n >>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\"patrickvonplaten/wav2vec2-base\")\n >>> model = Wav2Vec2ForPreTraining.from_pretrained(\"patrickvonplaten/wav2vec2-base\")\n\n >>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n >>> input_values = feature_extractor(ds[0][\"audio\"][\"array\"], return_tensors=\"pt\").input_values # Batch size 1\n\n >>> # compute masked indices\n >>> batch_size, raw_sequence_length = input_values.shape\n >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)\n >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)\n >>> mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long)\n\n >>> with torch.no_grad():\n ... outputs = model(input_values, mask_time_indices=mask_time_indices)\n\n >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)\n >>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)\n\n >>> # show that cosine similarity is much higher than random\n >>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5\n tensor(True)\n\n >>> # for contrastive loss training model should be put into train mode\n >>> model = model.train()\n >>> loss = model(input_values, mask_time_indices=mask_time_indices).loss\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if mask_time_indices is not None:\n mask_time_indices = mask_time_indices.to(torch.bool)\n\n outputs = self.wav2vec2(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n mask_time_indices=mask_time_indices,\n return_dict=return_dict,\n )\n\n # 1. project all transformed features (including masked) to final vq dim\n transformer_features = self.project_hid(outputs[0])\n\n # 2. quantize all (unmasked) extracted features and project to final vq dim\n extract_features = self.dropout_features(outputs[1])\n\n if attention_mask is not None:\n # compute reduced attention_mask correponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(\n extract_features.shape[1], attention_mask, add_adapter=False\n )\n\n quantized_features, codevector_perplexity = self.quantizer(\n extract_features, mask_time_indices=mask_time_indices\n )\n quantized_features = self.project_q(quantized_features)\n\n loss = contrastive_loss = diversity_loss = None\n if sampled_negative_indices is not None:\n batch_size, sequence_length, hidden_size = quantized_features.shape\n\n # for training, we sample negatives\n # 3. sample K negatives (distractors) quantized states for contrastive loss\n # if attention_mask is passed, make sure that padded feature vectors cannot be sampled\n # sample negative quantized vectors BTC => (BxT)C\n negative_quantized_features = quantized_features.view(-1, hidden_size)[\n sampled_negative_indices.long().view(-1)\n ]\n negative_quantized_features = negative_quantized_features.view(\n batch_size, sequence_length, -1, hidden_size\n ).permute(2, 0, 1, 3)\n\n # 4. compute logits, corresponding to `logs = sim(c_t, [q_t, \\sim{q}_t]) / \\kappa`\n # of equation (3) in https://arxiv.org/pdf/2006.11477.pdf\n logits = self.compute_contrastive_logits(\n quantized_features[None, :],\n negative_quantized_features,\n transformer_features,\n self.config.contrastive_logits_temperature,\n )\n\n # 5. if a negative vector is identical to the positive (i.e. when codebook utilization is low),\n # its cosine similarity will be masked\n neg_is_pos = (quantized_features == negative_quantized_features).all(-1)\n\n if neg_is_pos.any():\n logits[1:][neg_is_pos] = float(\"-inf\")\n\n # 6. compute contrastive loss \\mathbf{L}_m = cross_entropy(logs) =\n # -log(exp(sim(c_t, q_t)/\\kappa) / \\sum_{\\sim{q}} exp(sim(c_t, \\sim{q})/\\kappa))\n logits = logits.transpose(0, 2).reshape(-1, logits.size(0))\n target = ((1 - mask_time_indices.long()) * -100).transpose(0, 1).flatten()\n\n contrastive_loss = nn.functional.cross_entropy(logits.float(), target, reduction=\"sum\")\n # 7. compute diversity loss: \\mathbf{L}_d\n num_codevectors = self.config.num_codevectors_per_group * self.config.num_codevector_groups\n diversity_loss = ((num_codevectors - codevector_perplexity) / num_codevectors) * mask_time_indices.sum()\n\n # 8. \\mathbf{L} = \\mathbf{L}_m + \\alpha * \\mathbf{L}_d\n loss = contrastive_loss + self.config.diversity_loss_weight * diversity_loss\n\n if not return_dict:\n if loss is not None:\n return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n\n return Wav2Vec2ForPreTrainingOutput(\n loss=loss,\n projected_states=transformer_features,\n projected_quantized_states=quantized_features,\n codevector_perplexity=codevector_perplexity,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n contrastive_loss=contrastive_loss,\n diversity_loss=diversity_loss,\n )\n\n\n@add_start_docstrings(\"\"\"Wav2Vec2 Model with a `language modeling` head on top.\"\"\", WAV_2_VEC_2_START_DOCSTRING)\nclass Wav2Vec2ForMaskedLM(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n warnings.warn(\n \"The class `Wav2Vec2ForMaskedLM` is deprecated. Please use `Wav2Vec2ForCTC` instead.\", FutureWarning\n )\n\n self.wav2vec2 = Wav2Vec2Model(config)\n self.dropout = nn.Dropout(config.final_dropout)\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.wav2vec2(\n input_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n logits = self.lm_head(hidden_states)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return output\n\n return MaskedLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)\n\n\n@add_start_docstrings(\n \"\"\"Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n WAV_2_VEC_2_START_DOCSTRING,\n)\nclass Wav2Vec2ForCTC(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wav2vec2 = Wav2Vec2Model(config)\n self.dropout = nn.Dropout(config.final_dropout)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `Wav2Vec2ForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n output_hidden_size = (\n config.output_hidden_size if hasattr(config, \"add_adapter\") and config.add_adapter else config.hidden_size\n )\n self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wav2vec2.feature_extractor._freeze_parameters()\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_CTC_EXPECTED_OUTPUT,\n expected_loss=_CTC_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.wav2vec2(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Wav2Vec2 Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n \"\"\",\n WAV_2_VEC_2_START_DOCSTRING,\n)\nclass Wav2Vec2ForSequenceClassification(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Sequence classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)\"\n )\n self.wav2vec2 = Wav2Vec2Model(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)\n self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wav2vec2.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wav2vec2.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_SEQ_CLASS_CHECKPOINT,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,\n expected_loss=_SEQ_CLASS_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wav2vec2(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n if attention_mask is None:\n pooled_output = hidden_states.mean(dim=1)\n else:\n padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)\n hidden_states[~padding_mask] = 0.0\n pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization.\n \"\"\",\n WAV_2_VEC_2_START_DOCSTRING,\n)\nclass Wav2Vec2ForAudioFrameClassification(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Audio frame classification does not support the use of Wav2Vec2 adapters (config.add_adapter=True)\"\n )\n self.wav2vec2 = Wav2Vec2Model(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wav2vec2.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wav2vec2.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_FRAME_CLASS_CHECKPOINT,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_FRAME_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wav2vec2(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n logits = self.classifier(hidden_states)\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return output\n\n return TokenClassifierOutput(\n loss=None,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass AMSoftmaxLoss(nn.Module):\n def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):\n super(AMSoftmaxLoss, self).__init__()\n self.scale = scale\n self.margin = margin\n self.num_labels = num_labels\n self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, hidden_states, labels):\n labels = labels.flatten()\n weight = nn.functional.normalize(self.weight, dim=0)\n hidden_states = nn.functional.normalize(hidden_states, dim=1)\n cos_theta = torch.mm(hidden_states, weight)\n psi = cos_theta - self.margin\n\n onehot = nn.functional.one_hot(labels, self.num_labels)\n logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)\n loss = self.loss(logits, labels)\n\n return loss\n\n\nclass TDNNLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]\n self.out_conv_dim = config.tdnn_dim[layer_id]\n self.kernel_size = config.tdnn_kernel[layer_id]\n self.dilation = config.tdnn_dilation[layer_id]\n\n self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)\n self.activation = nn.ReLU()\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.unsqueeze(1)\n hidden_states = nn.functional.unfold(\n hidden_states,\n (self.kernel_size, self.in_conv_dim),\n stride=(1, self.in_conv_dim),\n dilation=(self.dilation, 1),\n )\n hidden_states = hidden_states.transpose(1, 2)\n hidden_states = self.kernel(hidden_states)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n Wav2Vec2 Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n \"\"\",\n WAV_2_VEC_2_START_DOCSTRING,\n)\nclass Wav2Vec2ForXVector(Wav2Vec2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wav2vec2 = Wav2Vec2Model(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])\n\n tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]\n self.tdnn = nn.ModuleList(tdnn_layers)\n\n self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)\n self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)\n\n self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.wav2vec2.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wav2vec2.parameters():\n param.requires_grad = False\n\n def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):\n \"\"\"\n Computes the output length of the TDNN layers\n \"\"\"\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return (input_length - kernel_size) // stride + 1\n\n for kernel_size in self.config.tdnn_kernel:\n input_lengths = _conv_out_length(input_lengths, kernel_size, 1)\n\n return input_lengths\n\n @add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_XVECTOR_CHECKPOINT,\n output_type=XVectorOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_XVECTOR_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wav2vec2(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n\n for tdnn_layer in self.tdnn:\n hidden_states = tdnn_layer(hidden_states)\n\n # Statistic Pooling\n if attention_mask is None:\n mean_features = hidden_states.mean(dim=1)\n std_features = hidden_states.std(dim=1)\n else:\n feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))\n tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)\n mean_features = []\n std_features = []\n for i, length in enumerate(tdnn_output_lengths):\n mean_features.append(hidden_states[i, :length].mean(dim=0))\n std_features.append(hidden_states[i, :length].std(dim=0))\n mean_features = torch.stack(mean_features)\n std_features = torch.stack(std_features)\n statistic_pooling = torch.cat([mean_features, std_features], dim=-1)\n\n output_embeddings = self.feature_extractor(statistic_pooling)\n logits = self.classifier(output_embeddings)\n\n loss = None\n if labels is not None:\n loss = self.objective(logits, labels)\n\n if not return_dict:\n output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return XVectorOutput(\n loss=loss,\n logits=logits,\n embeddings=output_embeddings,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.nn.functional.glu",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.cat",
"torch.FloatTensor",
"numpy.random.randint",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.mm",
"torch.ones",
"numpy.arange",
"torch.randn",
"torch.backends.cudnn.flags",
"torch.tensor",
"torch.bmm",
"torch.arange",
"torch.nn.GroupNorm",
"numpy.zeros",
"torch.ones_like",
"torch.nn.init.constant_",
"numpy.put_along_axis",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.nn.Linear",
"torch.log",
"numpy.random.rand",
"torch.nn.Conv1d",
"torch.stack",
"numpy.array",
"torch.nn.functional.ctc_loss",
"torch.nn.functional.normalize",
"numpy.random.random",
"torch.nn.functional.log_softmax",
"torch.nn.utils.weight_norm",
"numpy.ones",
"torch.nn.LayerNorm",
"numpy.random.uniform",
"numpy.broadcast_to",
"torch.nn.functional.one_hot",
"torch.nn.functional.unfold",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
AI4medicine-Berlin/explainable-predictive-models
|
[
"994134700eae6291cfe0cbd547e941018b00548b"
] |
[
"code/utils/models.py"
] |
[
"\"\"\"\nFile name: models.py\nAuthor: Esra Zihni\nDate created: 21.05.2018\n\nThis file contains the Model metaclass object that is used for implementing \nthe given models. It contains a class object for each individual model type.\n\"\"\"\n\nimport os\nimport pickle\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Dict, List, Union\n\nimport catboost as cat\nimport keras\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Sequential, load_model\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.model_selection import GridSearchCV, ParameterGrid\nfrom sklearn import svm\nfrom sklearn.naive_bayes import GaussianNB\nfrom utils.helper_functions import calc_perf_score\n\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n\n\nclass Model(metaclass=ABCMeta):\n \"\"\"\n\tA metaclass used to represent any model.\n\n\t:param name: Name of the model \n\t:param dataset: The dataset for the model to train on\n\t:param fixed_params: Hyperparameters that won't be used in model tuning\n\t:param tuning_params: Hyperparameters that can be used in model tuning\n\n\t.. py:meth:: Model.load_model(path)\n\t\t:param path: Path to model file.\n\t.. py:meth:: Model.run_gridsearch()\t\n\t.. py_meth:: Model.train()\n\t.. py.meth:: Model.save_model(path)\n\t\t:param path: Path to model file.\n\t.. py.meth:: Model.evaluate_performance(score_name)\n\t\t:param score_name: Name of the performance measure.\n\t\t:return: Training and test performance scores \n\n\t\"\"\"\n\n def __init__(\n self,\n name: str,\n dataset: Dict,\n fixed_params: Dict[str, Union[str, float]],\n tuning_params: Dict[str, Union[str, float]] = None,\n ):\n self.name = name\n self.X_tr = dataset[\"train_data\"]\n self.y_tr = dataset[\"train_labels\"]\n self.X_te = dataset[\"test_data\"]\n self.y_te = dataset[\"test_labels\"]\n\n self.fixed_params = fixed_params\n self.tuning_params = tuning_params\n\n if self.fixed_params.get(\"out_activation\") is \"softmax\":\n self.y_tr = pd.get_dummies(self.y_tr)\n self.y_te = pd.get_dummies(self.y_te)\n\n def load_model(self, path: str) -> None:\n \"\"\"\n\t\tLoads trained model from given path.\n\t\t:param path: Path to model file.\n\t\t\"\"\"\n if self.name == \"MLP\":\n self.best_model = load_model(path)\n else:\n self.best_model = pickle.load(open(path, \"rb\"))\n\n @abstractmethod\n def run_gridsearch(self) -> None:\n pass\n\n @abstractmethod\n def train(self) -> None:\n pass\n\n def save_model(self, path: str) -> None:\n \"\"\"\n\t\tSaves trained model to given path.\n\t\t:param path: Path to model file.\n\t\t\"\"\"\n if self.name == \"MLP\":\n self.best_model.save(path)\n else:\n pickle.dump(self.best_model, open(path, \"wb\"))\n\n def evaluate_performance(self, score_name: str) -> List[float]:\n \"\"\"\n\t\tEvaluates final model performance for the given performanc measure.\n\t\t:param score_name: Name of the performance measure.\n\t\t:return: Training and test performance scores \n\t\t\"\"\"\n training_performance = calc_perf_score(\n data=self.X_tr,\n labels=self.y_tr,\n model=self.best_model,\n model_name=self.name,\n score_name=score_name,\n )\n test_performance = calc_perf_score(\n data=self.X_te,\n labels=self.y_te,\n model=self.best_model,\n model_name=self.name,\n score_name=score_name,\n )\n return training_performance, test_performance\n\n\nclass SVMC(Model):\n \"\"\"\n\tA subclass of the Model metaclass used to represent a SVMC model.\n\n\t.. py:meth:: SVMC.run_gridsearch()\n\t.. py:meth:: SVMC.train()\n\n\t\"\"\"\n\n\n def run_gridsearch(self, cv, cv_score: str) -> None:\n \"\"\"\n\t\tPerforms a gridsearch over the tuning hyperparameters. Determines the \n\t\tbest hyperparameters based on the average validation performance \n\t\tcalculated over cross-validation folds.\n\t\t:param cv: A cross-validarion generator that determines the \n\t\tcross-validation strategy.\n\t\t:param cv_score: Measure to evaluate predictions on the validation set. \n\t\t\"\"\"\n \n model = svm.SVC(**self.fixed_params)\n \n gsearch = GridSearchCV(\n estimator=model,\n cv=cv,\n param_grid=self.tuning_params,\n scoring=cv_score.replace(\"AUC\", \"roc_auc\"),\n iid=True,\n n_jobs=-1,\n )\n gsearch.fit(\n self.X_tr.values.astype(\"float\"), np.ravel(self.y_tr.values.astype(\"float\"))\n )\n\n self.best_tuning_params = gsearch.best_params_\n\n def train(self, use_gridsearch_results: bool = True) -> None:\n \"\"\"\n\t\tTrains a SVMC model. \n\t\t:param use_gridsearch_results: Determines whether to use \n\t\thyperparameters selected through gridsearch\n\t\t\"\"\"\n params = self.fixed_params.copy()\n if use_gridsearch_results == True:\n self.tuning_params = self.best_tuning_params\n\n params.update(self.tuning_params)\n\n self.best_model = svm.SVC(**params)\n \n self.best_model.fit(self.X_tr, np.ravel(self.y_tr))\n\nclass NB(Model):\n \"\"\"\n\tA subclass of the Model metaclass used to represent a Gaussian Naive Bayes model.\n\n\t.. py:meth:: NB.train()\n\n\t\"\"\"\n def run_gridsearch(self) -> None:\n pass\n\n def train(self, **args):\n \"\"\"\n\t\tTrains a Gaussian Naive Bayes model. \n\t\t\"\"\"\n self.best_model = GaussianNB(**self.fixed_params)\n self.best_model.fit(self.X_tr, np.ravel(self.y_tr))\n\nclass GLM(Model):\n \"\"\"\n\tA subclass of the Model metaclass used to represent a Generalized\n\tLinear Model (GLM).\n\n\t.. py:meth:: GLM.train()\n\t\"\"\"\n def run_gridsearch(self) -> None:\n pass\n \n def train(self, **args):\n \"\"\"\n\t\tTrains a Generalized Linear Model (GLM).\n\t\t\"\"\"\n self.best_model = LogisticRegression(**self.fixed_params)\n self.best_model.fit(self.X_tr, np.ravel(self.y_tr))\n\n\n\nclass Lasso(Model):\n \"\"\"\n\tA subclass of the Model metaclass used to represent a Lasso model.\n\n\t.. py:meth:: Lasso.run_gridsearch()\n\t.. py:meth:: Lasso.train()\n\n\t\"\"\"\n\n def run_gridsearch(self, cv, cv_score: str) -> None:\n \"\"\"\n\t\tPerforms a gridsearch over the tuning hyperparameters. Determines the \n\t\tbest hyperparameters based on the average validation performance \n\t\tcalculated over cross-validation folds.\n\t\t:param cv: A cross-validarion generator that determines the \n\t\tcross-validation strategy.\n\t\t:param cv_score: Measure to evaluate predictions on the validation set. \n\t\t\"\"\"\n model = LogisticRegression(**self.fixed_params)\n\n gsearch = GridSearchCV(\n estimator=model,\n cv=cv,\n param_grid=self.tuning_params,\n scoring=cv_score.replace(\"AUC\", \"roc_auc\"),\n iid=True,\n n_jobs=-1,\n )\n gsearch.fit(\n self.X_tr.values.astype(\"float\"), np.ravel(self.y_tr.values.astype(\"float\"))\n )\n\n self.best_tuning_params = gsearch.best_params_\n\n def train(self, use_gridsearch_results: bool = True) -> None:\n \"\"\"\n\t\tTrains a Lasso model. \n\t\t:param use_gridsearch_results: Determines whether to use \n\t\thyperparameters selected through gridsearch\n\t\t\"\"\"\n params = self.fixed_params.copy()\n if use_gridsearch_results == True:\n self.tuning_params = self.best_tuning_params\n\n params.update(self.tuning_params)\n\n self.best_model = LogisticRegression(**params)\n self.best_model.fit(self.X_tr, np.ravel(self.y_tr))\n\n\nclass ElasticNet(Model):\n def run_gridsearch(self, cv, cv_score: str) -> None:\n \"\"\"\n\t\tPerforms a gridsearch over the tuning hyperparameters. Determines the \n\t\tbest hyperparameters based on the average validation performance \n\t\tcalculated over cross-validation folds.\n\t\t:param cv: A cross-validarion generator that determines the \n\t\tcross-validation strategy.\n\t\t:param cv_score: Measure to evaluate predictions on the validation set. \n\t\t\"\"\"\n model = SGDClassifier(**self.fixed_params)\n\n gsearch = GridSearchCV(\n estimator=model,\n cv=cv,\n param_grid=self.tuning_params,\n scoring=cv_score.replace(\"AUC\", \"roc_auc\"),\n iid=True,\n n_jobs=-1,\n )\n gsearch.fit(\n self.X_tr.values.astype(\"float\"), np.ravel(self.y_tr.values.astype(\"float\"))\n )\n\n self.best_tuning_params = gsearch.best_params_\n\n def train(self, use_gridsearch_results: bool = True) -> None:\n \"\"\"\n\t\tTrains an Elastic Net model. \n\t\t:param use_gridsearch_results: Determines whether to use \n\t\thyperparameters selected through gridsearch\n\t\t\"\"\"\n params = self.fixed_params.copy()\n if use_gridsearch_results == True:\n self.tuning_params = self.best_tuning_params\n\n params.update(self.tuning_params)\n\n self.best_model = SGDClassifier(**params)\n self.best_model.fit(self.X_tr, np.ravel(self.y_tr))\n\n\nclass Catboost(Model):\n def run_gridsearch(self, cv, cv_score: str) -> None:\n \"\"\"\n\t\tPerforms a gridsearch over the tuning hyperparameters. Determines the \n\t\tbest hyperparameters based on the average validation performance \n\t\tcalculated over cross-validation folds.\n\t\t:param cv: A cross-validarion generator that determines the \n\t\tcross-validation strategy.\n\t\t:param cv_score: Measure to evaluate predictions on the validation set. \n\t\t\"\"\"\n cats = self.X_tr.columns[self.X_tr.dtypes == \"category\"]\n cat_features = [list(self.X_tr).index(cats[i]) for i in range(len(cats))]\n params = self.fixed_params.copy()\n\n best_AUC = 0.5\n for tune in ParameterGrid(self.tuning_params):\n params.update(tune)\n\n AUC_val = []\n for train, val in cv.split(self.X_tr, self.y_tr):\n X_train, y_train = self.X_tr.iloc[train], self.y_tr.iloc[train]\n X_val, y_val = self.X_tr.iloc[val], self.y_tr.iloc[val]\n train_pool = cat.Pool(X_train, y_train, cat_features=cat_features)\n validate_pool = cat.Pool(X_val, y_val, cat_features=cat_features)\n\n model = cat.CatBoostClassifier(**params)\n model.fit(train_pool, eval_set=validate_pool, logging_level=\"Silent\")\n\n validation_AUC = calc_perf_score(\n data=X_val,\n labels=np.array(y_val.astype(\"float\")),\n model=model,\n model_name=self.name,\n score_name=cv_score,\n )\n AUC_val.append(validation_AUC)\n\n AUC_val = np.mean(AUC_val)\n\n if AUC_val > best_AUC:\n best_AUC = AUC_val\n self.best_tuning_params = tune\n\n def train(self, use_gridsearch_results: bool = True) -> None:\n \"\"\"\n\t\tTrains a Tree Boosting model. \n\t\t:param use_gridsearch_results: Determines whether to use \n\t\thyperparameters selected through gridsearch\n\t\t\"\"\"\n params = self.fixed_params.copy()\n cats = self.X_tr.columns[self.X_tr.dtypes == \"category\"]\n cat_features = [list(self.X_tr).index(cats[i]) for i in range(len(cats))]\n\n if use_gridsearch_results == True:\n self.tuning_params = self.best_tuning_params\n\n params.update(self.tuning_params)\n\n train_pool = cat.Pool(self.X_tr, self.y_tr, cat_features=cat_features)\n test_pool = cat.Pool(self.X_te, self.y_te, cat_features=cat_features)\n\n self.best_model = cat.CatBoostClassifier(**params)\n self.best_model.fit(train_pool, eval_set=test_pool, logging_level=\"Silent\")\n\n\nclass MLP(Model):\n def run_gridsearch(self, cv, cv_score: str) -> None:\n \"\"\"\n\t\tPerforms a gridsearch over the tuning hyperparameters. Determines the \n\t\tbest hyperparameters based on the average validation performance \n\t\tcalculated over cross-validation folds.\n\t\t:param cv: A cross-validarion generator that determines the \n\t\tcross-validation strategy.\n\t\t:param cv_score: Measure to evaluate predictions on the validation set. \n\t\t\"\"\"\n # Setting fixed parameters\n params = self.fixed_params.copy()\n\n # Fix seed\n np.random.seed(1)\n tf.set_random_seed(2)\n\n # Start Gridsearch\n best_AUC = 0.5\n for tune in ParameterGrid(self.tuning_params):\n params.update(tune)\n\n AUC_val = []\n for train, val in cv.split(self.X_tr, self.y_tr):\n X_train, y_train = self.X_tr.iloc[train], self.y_tr.iloc[train]\n X_val, y_val = self.X_tr.iloc[val], self.y_tr.iloc[val]\n\n e_stop = EarlyStopping(\n monitor=params[\"monitor\"],\n min_delta=params[\"min_delta\"],\n patience=params[\"iter_patience\"],\n mode=params[\"mode\"],\n )\n callbacks = [e_stop]\n optimizer = eval(\"keras.optimizers.\" + params[\"optimizer\"])(\n lr=params[\"learning_rate\"]\n )\n\n model = Sequential()\n model.add(\n Dense(\n params[\"num_neurons\"],\n input_dim=len(list(self.X_tr)),\n kernel_initializer=params[\"weight_init\"],\n activation=params[\"hidden_activation\"],\n kernel_regularizer=keras.regularizers.l1(params[\"l1_ratio\"]),\n )\n )\n model.add(Dropout(params[\"dropout_rate\"]))\n model.add(\n Dense(\n 1,\n kernel_initializer=params[\"weight_init\"],\n activation=params[\"out_activation\"],\n kernel_regularizer=keras.regularizers.l1(params[\"l1_ratio\"]),\n )\n )\n\n model.compile(loss=params[\"loss_func\"], optimizer=optimizer)\n\n history = model.fit(\n X_train,\n y_train,\n callbacks=callbacks,\n validation_data=(X_val, y_val),\n epochs=params[\"epochs\"],\n batch_size=params[\"batch_size\"],\n verbose=0,\n )\n\n validation_AUC = calc_perf_score(\n data=X_val,\n labels=np.array(y_val.astype(\"float\")),\n model=model,\n model_name=self.name,\n score_name=cv_score,\n )\n AUC_val.append(validation_AUC)\n\n AUC_val = np.mean(AUC_val)\n\n if AUC_val > best_AUC:\n best_AUC = AUC_val\n self.best_tuning_params = tune\n\n keras.backend.clear_session()\n\n def train(self, use_gridsearch_results: bool = True) -> None:\n \"\"\"\n\t\tTrains a Multilayer Perceptron (MLP) model. \n\t\t:param use_gridsearch_results: Determines whether to use \n\t\thyperparameters selected through gridsearch\n\t\t\"\"\"\n params = self.fixed_params.copy()\n if use_gridsearch_results == True:\n self.tuning_params = self.best_tuning_params\n\n params.update(self.tuning_params)\n\n e_stop = EarlyStopping(\n monitor=params[\"monitor\"],\n min_delta=params[\"min_delta\"],\n patience=params[\"iter_patience\"],\n mode=params[\"mode\"],\n )\n callbacks = [e_stop]\n optimizer = eval(\"keras.optimizers.\" + params[\"optimizer\"])(\n lr=params[\"learning_rate\"]\n )\n\n model = Sequential()\n model.add(\n Dense(\n params[\"num_neurons\"],\n input_dim=len(list(self.X_tr)),\n kernel_initializer=params[\"weight_init\"],\n activation=params[\"hidden_activation\"],\n kernel_regularizer=keras.regularizers.l2(params[\"l1_ratio\"]),\n )\n )\n model.add(Dropout(params[\"dropout_rate\"]))\n model.add(\n Dense(\n 1,\n kernel_initializer=params[\"weight_init\"],\n activation=params[\"out_activation\"],\n kernel_regularizer=keras.regularizers.l2(params[\"l1_ratio\"]),\n )\n )\n model.compile(loss=params[\"loss_func\"], optimizer=optimizer)\n\n history = model.fit(\n self.X_tr,\n self.y_tr,\n callbacks=callbacks,\n validation_data=(self.X_te, self.y_te),\n epochs=params[\"epochs\"],\n batch_size=params[\"batch_size\"],\n verbose=0,\n )\n\n self.best_model = model\n"
] |
[
[
"sklearn.naive_bayes.GaussianNB",
"sklearn.linear_model.LogisticRegression",
"numpy.random.seed",
"sklearn.model_selection.ParameterGrid",
"numpy.mean",
"sklearn.svm.SVC",
"tensorflow.set_random_seed",
"numpy.ravel",
"sklearn.linear_model.SGDClassifier",
"pandas.get_dummies"
]
] |
l-bat/nncf
|
[
"6258916cd5fa7fc010ad09da63113354358bffd8",
"6258916cd5fa7fc010ad09da63113354358bffd8"
] |
[
"tests/test_nncf_network.py",
"tests/quantization/test_hw_config.py"
] |
[
"\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport itertools\nfrom collections import Counter\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom typing import List\nfrom typing import Tuple\n\nimport networkx as nx\nimport pytest\nimport torch\nfrom nncf.graph.graph import InputAgnosticOperationExecutionContext\n\nfrom nncf.graph.graph import PTNNCFGraph\nfrom nncf.graph.graph_builder import GraphBuilder\nfrom nncf.graph.operator_metatypes import InputNoopMetatype, OutputNoopMetatype\nfrom torch import nn\n\nfrom nncf import register_module\nfrom nncf.common.graph.transformations.commands import TargetType\nfrom nncf.dynamic_graph.context import PreHookId\nfrom nncf.dynamic_graph.context import Scope\nfrom nncf.graph.graph import PTNNCFNode\nfrom nncf.graph.graph import NNCFGraph\nfrom nncf.graph.graph import NNCFNode\nfrom nncf.dynamic_graph.graph_tracer import ModelInputInfo\nfrom nncf.graph.transformations.layout import PTTransformationLayout\nfrom nncf.common.graph.graph import MODEL_OUTPUT_OP_NAME\nfrom nncf.common.graph.graph import MODEL_INPUT_OP_NAME\nfrom nncf.graph.version_agnostic_op_names import VersionAgnosticNames\nfrom nncf.layer_utils import _NNCFModuleMixin\nfrom nncf.module_operations import BaseOp\nfrom nncf.nncf_network import EXTERNAL_QUANTIZERS_STORAGE_NAME\nfrom nncf.nncf_network import NNCFNetwork, InsertionPointGraph, InsertionPointGraphNodeType\nfrom nncf.graph.transformations.commands import TransformationPriority\nfrom nncf.graph.transformations.commands import PTTargetPoint\nfrom nncf.graph.transformations.commands import PTInsertionCommand\nfrom nncf.nncf_network import PTInsertionPoint\nfrom nncf.nncf_network import PTInsertionType\nfrom nncf.nncf_network import PTModelTransformer\nfrom nncf.quantization.node_matcher import PTOperatorMetatypeNodeMatcher\nfrom tests.composite.test_sparsity_quantization import get_basic_sparsity_plus_quantization_config\nfrom tests.conftest import TEST_ROOT\nfrom tests.helpers import BasicConvTestModel\nfrom tests.helpers import TwoConvTestModel\nfrom tests.helpers import check_correct_nncf_modules_replacement\nfrom tests.helpers import create_compressed_model_and_algo_for_test\nfrom tests.test_models.synthetic import ManyNonEvalModules\n\n\ndef test_disable_shape_matching():\n class MatMulModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.dummy_param = torch.nn.Parameter(torch.ones([1]))\n\n def forward(self, inputs):\n half1, half2 = torch.chunk(inputs, 2, dim=2)\n return torch.bmm(half1, half2.transpose(1, 2))\n\n model = MatMulModel()\n\n input_shape_1 = (3, 32, 32)\n input_shape_2 = (4, 64, 64)\n\n qnet_no_shape = NNCFNetwork(deepcopy(model), input_infos=[ModelInputInfo(input_shape_1), ],\n scopes_without_shape_matching=['MatMulModel']) # type: NNCFNetwork\n _ = qnet_no_shape(torch.zeros(*input_shape_1))\n graph_1 = deepcopy(qnet_no_shape.get_dynamic_graph())\n\n _ = qnet_no_shape(torch.zeros(*input_shape_2))\n graph_2 = deepcopy(qnet_no_shape.get_dynamic_graph())\n\n assert graph_1 == graph_2\n\n nodes_1 = list(graph_1.get_all_nodes())\n assert len(nodes_1) == 3 # 1 input node + 1 operation node + 1 output node\n\n qnet = NNCFNetwork(model, input_infos=[ModelInputInfo(input_shape_1), ]) # type: NNCFNetwork\n _ = qnet(torch.zeros(*input_shape_1))\n _ = qnet(torch.zeros(*input_shape_2))\n # The second forward run should have led to an increase in registered node counts\n # since disable_shape_matching was False and the network was run with a different\n # shape of input tensor\n assert qnet.get_dynamic_graph().get_nodes_count() > graph_1.get_nodes_count()\n\n\ndef test_check_correct_modules_replacement():\n model = TwoConvTestModel()\n nncf_model = NNCFNetwork(TwoConvTestModel(), input_infos=[ModelInputInfo([1, 1, 4, 4])]) # type: NNCFNetwork\n\n _, nncf_modules = check_correct_nncf_modules_replacement(model, nncf_model)\n assert set(nncf_modules) == set(nncf_model.get_nncf_modules())\n\n\n@register_module()\nclass ModuleOfUser(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.ones([1]))\n\n def forward(self, input_):\n return input_ * self.weight\n\n\nclass TwoConvTestModelWithUserModule(TwoConvTestModel):\n def __init__(self):\n super().__init__()\n self.user_module = ModuleOfUser()\n\n def forward(self, x):\n x = super().forward(x)\n x = self.user_module(x)\n return x\n\n\ndef test_custom_module_registering():\n model = TwoConvTestModelWithUserModule()\n nncf_model = NNCFNetwork(model, input_infos=[ModelInputInfo([1, 1, 4, 4])]) # type: NNCFNetwork\n\n from nncf.layers import UNWRAPPED_USER_MODULES\n assert ModuleOfUser in UNWRAPPED_USER_MODULES.registry_dict.values()\n\n # pylint: disable=protected-access\n assert isinstance(nncf_model.user_module, ModuleOfUser)\n assert isinstance(nncf_model.user_module, _NNCFModuleMixin)\n assert type(nncf_model.user_module).__name__ == \"NNCFUserModuleOfUser\"\n\n user_module_attrs = dir(nncf_model.user_module)\n for attr in dir(_NNCFModuleMixin):\n assert attr in user_module_attrs\n\n\n# pylint: disable=protected-access\ndef test_find_node_in_nx_graph_by_scope():\n model = TwoConvTestModel()\n nncf_model = NNCFNetwork(deepcopy(model), input_infos=[ModelInputInfo([1, 1, 4, 4])]) # type: NNCFNetwork\n nncf_graph = nncf_model.get_original_graph()\n\n # Valid scopes should be successfully found\n valid_nncf_modules = nncf_model.get_nncf_modules()\n nodes_list = list(nncf_graph.get_all_node_ids())\n for module_scope, _ in valid_nncf_modules.items():\n graph_node = nncf_graph.find_node_in_nx_graph_by_scope(module_scope)\n assert graph_node is not None\n assert isinstance(graph_node, NNCFNode)\n assert graph_node.node_id in nodes_list\n\n fake_model = BasicConvTestModel()\n fake_nncf_model = NNCFNetwork(deepcopy(fake_model), input_infos=[ModelInputInfo([1, 1, 4, 4])])\n\n # Not valid scopes shouldn't be found\n fake_nncf_modules = fake_nncf_model.get_nncf_modules()\n for module_scope, _ in fake_nncf_modules.items():\n graph_node = nncf_graph.find_node_in_nx_graph_by_scope(module_scope)\n assert graph_node is None\n\n\nclass InsertionPointTestModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 1, 1, 1)\n self.linear_wts = nn.Parameter(torch.FloatTensor(size=(100, 100)))\n self.conv2 = nn.Conv2d(1, 1, 1, 1)\n self.relu = nn.ReLU()\n\n def forward(self, input_):\n x = self.conv1(input_)\n x = x.flatten()\n x = nn.functional.linear(x, self.linear_wts)\n x = x.reshape((1, 1, 10, 10))\n x = self.conv2(x)\n x = self.relu(x)\n return x\n\n\nclass TestInsertionCommands:\n @pytest.fixture()\n def setup(self):\n self.compressed_model = NNCFNetwork(InsertionPointTestModel(),\n [ModelInputInfo([1, 1, 10, 10])]) # type: NNCFNetwork\n\n conv1_module_scope = Scope.from_str('InsertionPointTestModel/NNCFConv2d[conv1]')\n point_for_conv1_weights = PTTargetPoint(target_type=TargetType.OPERATION_WITH_WEIGHTS,\n module_scope=conv1_module_scope)\n point_for_conv1_inputs = PTTargetPoint(target_type=TargetType.OPERATION_WITH_WEIGHTS,\n module_scope=conv1_module_scope)\n point_for_conv1_activations = PTTargetPoint(target_type=TargetType.POST_LAYER_OPERATION,\n module_scope=conv1_module_scope)\n\n conv2_module_scope = Scope.from_str('InsertionPointTestModel/NNCFConv2d[conv2]')\n point_for_conv2_weights = PTTargetPoint(target_type=TargetType.OPERATION_WITH_WEIGHTS,\n module_scope=conv2_module_scope)\n point_for_conv2_inputs = PTTargetPoint(target_type=TargetType.OPERATION_WITH_WEIGHTS,\n module_scope=conv2_module_scope)\n point_for_conv2_activations = PTTargetPoint(target_type=TargetType.POST_LAYER_OPERATION,\n module_scope=conv2_module_scope)\n\n linear_op_scope = Scope.from_str('InsertionPointTestModel/linear_0')\n linear_op_context = InputAgnosticOperationExecutionContext('linear',\n linear_op_scope,\n 0)\n point_for_linear_weight_input = PTTargetPoint(target_type=TargetType.OPERATOR_PRE_HOOK,\n ia_op_exec_context=linear_op_context, input_port_id=0)\n point_for_linear_activation = PTTargetPoint(target_type=TargetType.OPERATOR_POST_HOOK,\n ia_op_exec_context=linear_op_context)\n\n relu_op_scope = Scope.from_str('InsertionPointTestModel/ReLU[relu]/relu')\n relu_op_context = InputAgnosticOperationExecutionContext('relu',\n relu_op_scope,\n 0)\n point_for_relu_inputs = PTTargetPoint(target_type=TargetType.OPERATOR_PRE_HOOK,\n ia_op_exec_context=relu_op_context, input_port_id=0)\n point_for_relu_activations = PTTargetPoint(target_type=TargetType.OPERATOR_POST_HOOK,\n ia_op_exec_context=relu_op_context)\n\n available_points = [point_for_conv1_weights,\n point_for_conv2_weights,\n point_for_conv1_inputs,\n point_for_conv2_inputs,\n point_for_conv1_activations,\n point_for_conv2_activations,\n point_for_linear_activation,\n point_for_linear_weight_input,\n point_for_relu_activations,\n point_for_relu_inputs]\n\n @pytest.mark.parametrize(\"target_point\", available_points)\n def test_single_insertions(self, setup, target_point):\n if target_point.target_type in [TargetType.OPERATOR_PRE_HOOK, TargetType.OPERATOR_POST_HOOK]:\n hook = lambda x: x\n else:\n hook = BaseOp(lambda x: x)\n\n pt_ip = PTInsertionPoint(target_point)\n self.compressed_model.insert_at_point(pt_ip, [hook])\n\n # pylint:disable=protected-access\n if pt_ip.insertion_type == PTInsertionType.OPERATOR_PRE_HOOK:\n ctx = self.compressed_model.get_tracing_context()\n pre_hook_id = PreHookId(target_point.ia_op_exec_context, input_port_id=target_point.input_port_id)\n assert ctx._pre_hooks[pre_hook_id][0] is hook\n if pt_ip.insertion_type == PTInsertionType.OPERATOR_POST_HOOK:\n ctx = self.compressed_model.get_tracing_context()\n assert ctx._post_hooks[target_point.ia_op_exec_context][0] is hook\n if pt_ip.insertion_type == PTInsertionType.NNCF_MODULE_PRE_OP:\n module = self.compressed_model.get_module_by_scope(target_point.module_scope)\n assert module.pre_ops[\"0\"] is hook\n\n if pt_ip.insertion_type == PTInsertionType.NNCF_MODULE_POST_OP:\n module = self.compressed_model.get_module_by_scope(target_point.module_scope)\n assert module.post_ops[\"0\"] is hook\n\n priority_types = [\"same\", \"different\"]\n insertion_types = TargetType\n priority_test_cases = list(itertools.product(priority_types, insertion_types))\n\n @staticmethod\n def check_order(iterable1: List, iterable2: List, ordering: List):\n for idx, order in enumerate(ordering):\n assert iterable1[idx] is iterable2[order]\n\n # pylint:disable=undefined-variable\n @pytest.mark.parametrize(\"case\", priority_test_cases, ids=[x[1].name + '-' + x[0] for x in priority_test_cases])\n def test_priority(self, case, setup):\n # pylint:disable=too-many-branches\n priority_type = case[0]\n insertion_type = case[1]\n if insertion_type in [TargetType.OPERATION_WITH_WEIGHTS, TargetType.POST_LAYER_OPERATION]:\n hook1 = BaseOp(lambda x: x)\n hook2 = BaseOp(lambda x: 2 * x)\n hook3 = BaseOp(lambda x: 3 * x)\n else:\n hook1 = lambda x: x\n hook2 = lambda x: 2 * x\n hook3 = lambda x: 3 * x\n\n if insertion_type == TargetType.OPERATION_WITH_WEIGHTS:\n point = self.point_for_conv2_weights\n elif insertion_type == TargetType.POST_LAYER_OPERATION:\n point = self.point_for_conv1_activations\n elif insertion_type == TargetType.OPERATOR_PRE_HOOK:\n point = self.point_for_linear_weight_input\n elif insertion_type == TargetType.OPERATOR_POST_HOOK:\n point = self.point_for_relu_activations\n else:\n pytest.skip(\"Insertion type {} currently unsupported in PT\".format(insertion_type))\n\n if priority_type == \"same\":\n # Same-priority commands will be executed in registration order\n command1 = PTInsertionCommand(point, hook1, TransformationPriority.DEFAULT_PRIORITY)\n command2 = PTInsertionCommand(point, hook2, TransformationPriority.DEFAULT_PRIORITY)\n command3 = PTInsertionCommand(point, hook3, TransformationPriority.DEFAULT_PRIORITY)\n else:\n # Prioritized commands will be executed in ascending priority order\n command1 = PTInsertionCommand(point, hook1, TransformationPriority.SPARSIFICATION_PRIORITY)\n command2 = PTInsertionCommand(point, hook2, TransformationPriority.QUANTIZATION_PRIORITY)\n command3 = PTInsertionCommand(point, hook3, TransformationPriority.DEFAULT_PRIORITY)\n\n layout = PTTransformationLayout()\n layout.register(command1)\n layout.register(command2)\n layout.register(command3)\n self.compressed_model = PTModelTransformer(self.compressed_model, layout).transform()\n\n hook_list = [hook1, hook2, hook3]\n\n if priority_type == \"same\":\n order = [0, 1, 2]\n elif priority_type == \"different\":\n order = [2, 0, 1]\n\n # pylint:disable=protected-access\n if insertion_type == TargetType.OPERATOR_PRE_HOOK:\n ctx = self.compressed_model.get_tracing_context()\n pre_hook_id = PreHookId(point.ia_op_exec_context, input_port_id=point.input_port_id)\n self.check_order(ctx._pre_hooks[pre_hook_id], hook_list, order)\n if insertion_type == TargetType.OPERATOR_POST_HOOK:\n ctx = self.compressed_model.get_tracing_context()\n self.check_order(ctx._post_hooks[point.ia_op_exec_context], hook_list, order)\n\n if insertion_type == TargetType.OPERATION_WITH_WEIGHTS:\n module = self.compressed_model.get_module_by_scope(point.module_scope)\n # Works because Pytorch ModuleDict is ordered\n self.check_order([x.operand for x in module.pre_ops.values()], hook_list, order)\n\n if insertion_type == TargetType.POST_LAYER_OPERATION:\n module = self.compressed_model.get_module_by_scope(point.module_scope)\n # Works because Pytorch ModuleDict is ordered\n self.check_order(list(module.post_ops.values()), hook_list, order)\n\n\ndef mark_input_ports_lexicographically_based_on_input_node_key(graph: nx.DiGraph):\n for node_key in graph.nodes:\n input_edges = graph.in_edges(node_key)\n sorted_input_edges = sorted(input_edges, key=lambda x: x[0])\n for idx, edge in enumerate(sorted_input_edges):\n graph.edges[edge][PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR] = idx\n\n\ndef get_nncf_graph_from_mock_nx_graph(nx_graph: nx.DiGraph) -> PTNNCFGraph:\n mock_graph = PTNNCFGraph()\n key_vs_id = {}\n edge_vs_output_idx_and_creator_id = {} # type: Dict[Tuple[str, str], Tuple[int, int]]\n from networkx.algorithms.dag import lexicographical_topological_sort\n for idx, curr_node_key in enumerate(lexicographical_topological_sort(nx_graph)):\n node = nx_graph.nodes[curr_node_key]\n if PTNNCFGraph.IA_OP_EXEC_CONTEXT_NODE_ATTR in node:\n ia_op_exec_context = node[PTNNCFGraph.IA_OP_EXEC_CONTEXT_NODE_ATTR]\n else:\n ia_op_exec_context = InputAgnosticOperationExecutionContext(curr_node_key, Scope(), 0)\n module_attributes = node.get(PTNNCFGraph.MODULE_ATTRIBUTES)\n node_id = idx\n node = PTNNCFNode(node_id, ia_op_exec_context, {\n PTNNCFGraph.MODULE_ATTRIBUTES: module_attributes\n })\n mock_graph.add_nncf_node(node)\n key_vs_id[curr_node_key] = node_id\n\n preds = list(nx_graph.predecessors(curr_node_key))\n for pred_idx, pred in enumerate(preds):\n in_edge = (pred, curr_node_key)\n _, creator_id = edge_vs_output_idx_and_creator_id[in_edge]\n mock_graph.add_edge_between_nncf_nodes(creator_id, node_id,\n [1, 1, 1, 1], pred_idx)\n\n for out_idx, out_edge in enumerate(nx_graph.out_edges(curr_node_key)):\n edge_vs_output_idx_and_creator_id[out_edge] = (out_idx, node.node_id)\n return mock_graph\n\n\ndef get_two_branch_mock_model_graph() -> PTNNCFGraph:\n mock_nx_graph = nx.DiGraph()\n\n # (0 /A)\n # |\n # (1 /B)\n # / \\\n # (2 /C) (3 /D)\n # | |\n # (4 /E) |\n # \\ /\n # (5 /F)\n # |\n # (6 /G)\n # |\n # (7 /H)\n\n node_keys = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n\n for node_key in node_keys:\n mock_nx_graph.add_node(node_key)\n\n mock_nx_graph.add_edges_from([('A', 'B'), ('B', 'C'), ('B', 'D'), ('C', 'E'), ('E', 'F'),\n ('D', 'F'), ('F', 'G'), ('G', 'H')])\n\n mark_input_ports_lexicographically_based_on_input_node_key(mock_nx_graph)\n return get_nncf_graph_from_mock_nx_graph(mock_nx_graph)\n\n\nMOCK_OPERATOR_NAME = \"conv_transpose2d\"\n\n\ndef get_mock_nncf_node_attrs(op_name=None, scope_str=None):\n op_name_to_set = op_name if op_name is not None else MOCK_OPERATOR_NAME\n scope_to_set = Scope() if scope_str is None else Scope.from_str(scope_str)\n return {\n PTNNCFGraph.IA_OP_EXEC_CONTEXT_NODE_ATTR: InputAgnosticOperationExecutionContext(op_name_to_set,\n scope_to_set,\n 0)\n }\n\n\ndef get_mock_model_graph_with_mergeable_pattern() -> NNCFGraph:\n mock_nx_graph = nx.DiGraph()\n\n # (A)\n # |\n # (conv2d)\n # |\n # (batch_norm)\n # |\n # (RELU)\n # |\n # (B)\n\n node_keys = ['conv2d', 'batch_norm', VersionAgnosticNames.RELU, 'A', 'B']\n for node_key in node_keys:\n mock_nx_graph.add_node(node_key, **get_mock_nncf_node_attrs(op_name=node_key))\n\n mock_nx_graph.add_edges_from([('A', 'conv2d', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('conv2d', 'batch_norm', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('batch_norm', VersionAgnosticNames.RELU, {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n (VersionAgnosticNames.RELU, 'B', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0})])\n return get_nncf_graph_from_mock_nx_graph(mock_nx_graph)\n\n\ndef get_mock_model_graph_with_no_mergeable_pattern() -> NNCFGraph:\n mock_nx_graph = nx.DiGraph()\n\n # (A)\n # |\n # (conv2d)\n # |\n # (C)\n # |\n # (batch_norm)\n # |\n # (D)\n # |\n # (RELU)\n # |\n # (B)\n\n node_keys = ['conv2d', 'batch_norm', VersionAgnosticNames.RELU, 'A', 'B', 'C', 'D']\n for node_key in node_keys:\n mock_nx_graph.add_node(node_key, **get_mock_nncf_node_attrs(op_name=node_key))\n\n mock_nx_graph.add_edges_from([('A', 'conv2d', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('conv2d', 'C', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('C', 'batch_norm', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('batch_norm', 'D', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('D', VersionAgnosticNames.RELU, {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n (VersionAgnosticNames.RELU, 'B', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0})])\n return get_nncf_graph_from_mock_nx_graph(mock_nx_graph)\n\n\ndef get_mock_model_graph_with_broken_output_edge_pattern() -> NNCFGraph:\n mock_nx_graph = nx.DiGraph()\n\n # (A)\n # |\n # (conv2d)----\\\n # | |\n # (batch_norm) |\n # | |\n # (RELU) |\n # | |\n # (C)--------/\n # |\n # (B)\n\n node_keys = ['conv2d', 'batch_norm', VersionAgnosticNames.RELU, 'A', 'B', 'C']\n for node_key in node_keys:\n mock_nx_graph.add_node(node_key, **get_mock_nncf_node_attrs(op_name=node_key))\n\n mock_nx_graph.add_edges_from([('A', 'conv2d', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('conv2d', 'batch_norm', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('conv2d', 'C', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 1}),\n ('batch_norm', VersionAgnosticNames.RELU, {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n (VersionAgnosticNames.RELU, 'C', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0}),\n ('C', 'B', {PTNNCFGraph.IN_PORT_NAME_EDGE_ATTR: 0})])\n return get_nncf_graph_from_mock_nx_graph(mock_nx_graph)\n\n\nMERGE_PATTERN_TEST_CASES = (\n [get_mock_model_graph_with_mergeable_pattern, \"basic_pattern\"],\n [get_mock_model_graph_with_no_mergeable_pattern, \"no_pattern\"],\n [get_mock_model_graph_with_broken_output_edge_pattern, \"broken_output_edges_pattern\"]\n)\n\n\nclass TestInsertionPointGraph:\n def test_insertion_point_setup(self):\n # TODO: Change testing premises when module pre/post-op hooks and input/output nodes\n # are correctly handled\n mock_graph = get_two_branch_mock_model_graph()\n\n ip_graph = InsertionPointGraph(mock_graph)\n\n nx_graph = mock_graph.get_nx_graph_copy()\n ref_node_len = 3 * len(nx_graph.nodes) # 2 additional nodes per each operator node\n ref_edge_len = 3 * len(nx_graph.edges)\n\n assert len(ip_graph.nodes) == ref_node_len\n assert len(ip_graph.edges) == ref_edge_len\n\n for nncf_node_idx in mock_graph.get_all_node_ids():\n node_key = mock_graph.get_node_key_by_id(nncf_node_idx)\n ip_graph_op_node = ip_graph.nodes[node_key]\n assert ip_graph_op_node[InsertionPointGraph.NODE_TYPE_NODE_ATTR] == InsertionPointGraphNodeType.OPERATOR\n preds = list(ip_graph.predecessors(node_key))\n succs = list(ip_graph.successors(node_key))\n assert len(succs) == 1\n post_hook_ip_node_key = succs[0]\n post_hook_ip_node = ip_graph.nodes[succs[0]]\n post_hook_ip_node_type = post_hook_ip_node[InsertionPointGraph.NODE_TYPE_NODE_ATTR]\n assert post_hook_ip_node_type == InsertionPointGraphNodeType.INSERTION_POINT\n\n pre_hook_ip_node_keys = preds\n for pre_hook_ip_node_key in pre_hook_ip_node_keys:\n pre_hook_ip_node = ip_graph.nodes[pre_hook_ip_node_key]\n pre_hook_ip_node_type = pre_hook_ip_node[InsertionPointGraph.NODE_TYPE_NODE_ATTR]\n assert pre_hook_ip_node_type == InsertionPointGraphNodeType.INSERTION_POINT\n\n ref_associated_ip_node_keys_set = {*pre_hook_ip_node_keys, post_hook_ip_node_key}\n assert ref_associated_ip_node_keys_set == ip_graph_op_node[\n InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR]\n original_neighbours = nx_graph.neighbors(node_key)\n for neighbour in original_neighbours:\n # IP node insertion should not disrupt the graph superstructure\n ip_graph_paths = list(nx.all_simple_paths(ip_graph, node_key, neighbour))\n for path in ip_graph_paths:\n path = path[1:-1]\n for path_node_key in path:\n node = ip_graph.nodes[path_node_key]\n node_type = node[InsertionPointGraph.NODE_TYPE_NODE_ATTR]\n assert node_type == InsertionPointGraphNodeType.INSERTION_POINT\n\n for node_key, node in ip_graph.nodes.items():\n preds = list(ip_graph.predecessors(node_key))\n succs = list(ip_graph.successors(node_key))\n assert len(preds) != 0 or len(succs) != 0\n\n for from_node_key, to_node_key in ip_graph.edges.keys():\n assert from_node_key in ip_graph.nodes\n assert to_node_key in ip_graph.nodes\n\n def test_insertion_point_data_in_ip_nodes(self):\n # TODO: extend for modules\n mock_graph = nx.DiGraph()\n\n mock_graph.add_node('bar')\n mock_graph.add_node('baz')\n mock_graph.add_edge('bar', 'baz')\n nncf_graph = get_nncf_graph_from_mock_nx_graph(mock_graph)\n\n ip_graph = InsertionPointGraph(nncf_graph)\n\n for nncf_node in nncf_graph.get_all_nodes():\n node_id = nncf_node.node_id\n node_key = nncf_graph.get_node_key_by_id(node_id)\n preds = list(ip_graph.predecessors(node_key))\n succs = list(ip_graph.successors(node_key))\n\n post_hook_ip_node = ip_graph.nodes[succs[0]]\n post_hook_ip = post_hook_ip_node[InsertionPointGraph.INSERTION_POINT_DATA_NODE_ATTR]\n assert post_hook_ip.target_type == TargetType.OPERATOR_POST_HOOK\n assert post_hook_ip.ia_op_exec_context == nncf_node.ia_op_exec_context\n\n for pre_hook_ip_node_key in preds:\n pre_hook_ip_node = ip_graph.nodes[pre_hook_ip_node_key]\n pre_hook_ip = pre_hook_ip_node[InsertionPointGraph.INSERTION_POINT_DATA_NODE_ATTR]\n assert pre_hook_ip.target_type == TargetType.OPERATOR_PRE_HOOK\n assert pre_hook_ip.ia_op_exec_context == nncf_node.ia_op_exec_context\n\n def test_operator_metatype_marking(self):\n from nncf.graph.operator_metatypes import Conv2dMetatype, BatchNormMetatype, RELUMetatype, \\\n MaxPool2dMetatype, \\\n ConvTranspose2dMetatype, DepthwiseConv2dSubtype, AddMetatype, AvgPool2dMetatype, LinearMetatype\n ref_scope_vs_metatype_dict = {\n \"/\" + MODEL_INPUT_OP_NAME + \"_0\": InputNoopMetatype,\n \"ModelForMetatypeTesting/NNCFConv2d[conv_regular]/conv2d_0\": Conv2dMetatype,\n \"ModelForMetatypeTesting/BatchNorm2d[bn]/batch_norm_0\": BatchNormMetatype,\n \"ModelForMetatypeTesting/RELU_0\": RELUMetatype,\n \"ModelForMetatypeTesting/MaxPool2d[max_pool2d]/max_pool2d_0\": MaxPool2dMetatype,\n \"ModelForMetatypeTesting/NNCFConvTranspose2d[conv_transpose]/conv_transpose2d_0\": ConvTranspose2dMetatype,\n \"ModelForMetatypeTesting/NNCFConv2d[conv_depthwise]/conv2d_0\": DepthwiseConv2dSubtype,\n \"ModelForMetatypeTesting/__iadd___0\": AddMetatype,\n \"ModelForMetatypeTesting/AdaptiveAvgPool2d[adaptive_avg_pool]/adaptive_avg_pool2d_0\": AvgPool2dMetatype,\n \"ModelForMetatypeTesting/NNCFLinear[linear]/linear_0\": LinearMetatype,\n \"/\" + MODEL_OUTPUT_OP_NAME + \"_0\": OutputNoopMetatype,\n }\n\n class ModelForMetatypeTesting(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv_regular = torch.nn.Conv2d(in_channels=3,\n out_channels=16,\n kernel_size=3)\n self.bn = torch.nn.BatchNorm2d(num_features=16)\n self.max_pool2d = torch.nn.MaxPool2d(kernel_size=2)\n self.conv_transpose = torch.nn.ConvTranspose2d(in_channels=16,\n out_channels=8,\n kernel_size=3)\n self.conv_depthwise = torch.nn.Conv2d(in_channels=8, out_channels=8,\n kernel_size=5, groups=8)\n self.adaptive_avg_pool = torch.nn.AdaptiveAvgPool2d(output_size=1)\n self.linear = torch.nn.Linear(in_features=8, out_features=1)\n\n def forward(self, input_):\n x = self.conv_regular(input_)\n x = self.bn(x)\n x = torch.nn.functional.relu(x)\n x.transpose_(2, 3)\n x = self.max_pool2d(x)\n x = self.conv_transpose(x)\n x = self.conv_depthwise(x)\n x += torch.ones_like(x)\n x = self.adaptive_avg_pool(x)\n x = self.linear(x.flatten())\n return x\n\n model = ModelForMetatypeTesting()\n nncf_network = NNCFNetwork(model, [ModelInputInfo([1, 3, 300, 300])])\n nncf_graph = nncf_network.get_original_graph()\n\n for nncf_node in nncf_graph.get_all_nodes(): # type: PTNNCFNode\n scope_str = str(nncf_node.ia_op_exec_context)\n assert scope_str in ref_scope_vs_metatype_dict\n ref_metatype = ref_scope_vs_metatype_dict[scope_str]\n assert PTOperatorMetatypeNodeMatcher.match(nncf_node) == ref_metatype\n\n @pytest.mark.parametrize((\"mock_graph_factory\", \"dot_file_name\"),\n MERGE_PATTERN_TEST_CASES,\n ids=[x[1] for x in MERGE_PATTERN_TEST_CASES])\n def test_get_ip_graph_with_merged_operations(self, mock_graph_factory, dot_file_name):\n mock_graph = mock_graph_factory()\n ip_graph = InsertionPointGraph(mock_graph)\n merged_ip_graph = ip_graph.get_ip_graph_with_merged_hw_optimized_operations()\n\n data_dir = TEST_ROOT / 'data/reference_graphs/pattern_merging' # type: Path\n\n path_to_dot_file = data_dir / '{}.dot'.format(dot_file_name)\n\n # validate .dot file manually!\n if not path_to_dot_file.exists():\n if not data_dir.exists():\n data_dir.mkdir(parents=True)\n nx.drawing.nx_pydot.write_dot(merged_ip_graph, str(path_to_dot_file))\n\n load_graph = nx.drawing.nx_pydot.read_dot(str(path_to_dot_file))\n\n for key in load_graph.nodes.keys():\n key.replace(r'\\\\n', r'\\n') # Somehow pydot mangles the \\n characters while writing a .dot file\n\n sanitized_loaded_keys = [key.replace('\\\\n', '\\n') for key in load_graph.nodes.keys()]\n sanitized_loaded_edges = [(u.replace('\\\\n', '\\n'),\n v.replace('\\\\n', '\\n')) for u, v in nx.DiGraph(load_graph).edges]\n\n assert Counter(sanitized_loaded_keys) == Counter(list(merged_ip_graph.nodes.keys()))\n assert Counter(sanitized_loaded_edges) == Counter(list(merged_ip_graph.edges))\n\n\ndef test_can_collect_scopes_of_train_only_modules():\n model = ManyNonEvalModules()\n graph_builder = GraphBuilder(custom_forward_fn=lambda model_: model_(torch.randn([1, 1, 1, 1])))\n actual_scopes = NNCFNetwork.collect_eval_only_ops_exec_context(model, graph_builder)\n ref_scopes = {\n 'ManyNonEvalModules/AvgPool2d[avg_pool]/avg_pool2d_0',\n 'ManyNonEvalModules/ModuleWithMixedModules[mixed_modules]/Dropout/dropout_0',\n 'ManyNonEvalModules/ModuleWithMixedModules[mixed_modules]/Dropout/dropout_1',\n 'ManyNonEvalModules/ModuleWithMixedModules[mixed_modules]/Linear[called_linear]/linear_0',\n 'ManyNonEvalModules/ModuleWithMixedModules[mixed_modules]/CustomWeightModule[custom]/linear_0'\n }\n assert set(actual_scopes) == ref_scopes\n\n\ndef test_get_clean_shallow_copy():\n model = TwoConvTestModelWithUserModule()\n config = get_basic_sparsity_plus_quantization_config()\n sparse_quantized_model, _ = create_compressed_model_and_algo_for_test(model, config)\n external_quantizers = getattr(sparse_quantized_model, EXTERNAL_QUANTIZERS_STORAGE_NAME)\n assert external_quantizers\n old_nncf_modules = sparse_quantized_model.get_nncf_modules().values()\n old_nncf_module_pre_ops = [module.pre_ops for module in old_nncf_modules]\n assert any(old_nncf_module_pre_ops)\n assert sparse_quantized_model.get_graph().get_nodes_count() != \\\n sparse_quantized_model.get_original_graph().get_nodes_count()\n\n clean_copy = sparse_quantized_model.get_clean_shallow_copy()\n assert clean_copy is not sparse_quantized_model\n assert clean_copy.get_nncf_wrapped_model() is sparse_quantized_model.get_nncf_wrapped_model()\n new_nncf_modules = clean_copy.get_nncf_modules().values()\n new_nncf_module_pre_ops = [module.pre_ops for module in new_nncf_modules]\n assert not any(new_nncf_module_pre_ops)\n assert clean_copy.get_graph().get_nodes_count() == clean_copy.get_original_graph().get_nodes_count()\n\n\ndef test_temporary_clean_view():\n model = TwoConvTestModelWithUserModule()\n config = get_basic_sparsity_plus_quantization_config()\n sparse_quantized_model, _ = create_compressed_model_and_algo_for_test(model, config)\n old_sd = sparse_quantized_model.state_dict()\n old_graph = deepcopy(sparse_quantized_model.get_graph())\n with sparse_quantized_model.temporary_clean_view() as intermediate_model:\n clean_sd = intermediate_model.state_dict()\n assert len(clean_sd) < len(old_sd)\n new_nncf_modules = intermediate_model.get_nncf_modules().values()\n new_nncf_module_pre_ops = [module.pre_ops for module in new_nncf_modules]\n assert not any(new_nncf_module_pre_ops)\n assert intermediate_model.get_graph().get_nodes_count() == \\\n intermediate_model.get_original_graph().get_nodes_count()\n sd_after_tmp_clean_view = sparse_quantized_model.state_dict()\n for key in old_sd.keys():\n assert key in sd_after_tmp_clean_view\n assert torch.all(torch.eq(sd_after_tmp_clean_view[key], old_sd[key]))\n sparse_quantized_model.rebuild_graph()\n graph_after_tmp_clean_view = sparse_quantized_model.get_graph()\n assert graph_after_tmp_clean_view == old_graph\n\n\nclass TestModelMultipleForward(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(1, 1, 1, 1)\n self.conv1 = nn.Conv2d(1, 1, 1, 1)\n\n def forward(self, x):\n x = self.conv(x)\n x1 = self.conv1(x)\n x2 = self.conv1(x)\n return x1, x2\n\n\ndef test_multiple_forward():\n # Check that all convolution nodes in model have ia_op_exec_context and module_attributes\n # for case with multiple forward of one module\n model = TestModelMultipleForward()\n config = get_basic_sparsity_plus_quantization_config()\n sparse_quantized_model, _ = create_compressed_model_and_algo_for_test(model, config)\n graph = sparse_quantized_model.get_original_graph()\n for node_key in list(graph.get_all_node_keys())[1:-2]:\n node = graph.get_nx_node_by_key(node_key)\n assert node.get(PTNNCFGraph.IA_OP_EXEC_CONTEXT_NODE_ATTR)\n assert node.get(PTNNCFGraph.MODULE_ATTRIBUTES)\n",
"\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport torch\n\nfrom nncf.dynamic_graph.graph_tracer import ModelInputInfo\nfrom nncf.common.graph.graph import MODEL_INPUT_OP_NAME\nfrom nncf.hw_config import HWConfig\nfrom nncf.nncf_network import NNCFNetwork\nfrom nncf.quantization.algo import QuantizationBuilder, QuantizationController, QuantizerSetupGeneratorBase\nfrom nncf.quantization.layers import SymmetricQuantizer, AsymmetricQuantizer, BaseQuantizer\nfrom nncf.common.quantization.structs import QuantizationMode\n\nfrom tests.quantization.test_quantization_helpers import get_quantization_config_without_range_init\n\n\nclass ModelForHWConfigTest(torch.nn.Module):\n def __init__(self, with_gelu=False):\n super().__init__()\n self.with_gelu = with_gelu\n self.conv2d = torch.nn.Conv2d(2, 1, 1)\n\n def forward(self, x_):\n if self.with_gelu:\n x_ = torch.nn.functional.gelu(x_)\n x_ = self.conv2d(x_)\n x_ = x_.matmul(x_)\n return x_\n\n\nclass TestHWConfigRules:\n @staticmethod\n def get_model_and_ctrl_with_applied_hw_config_quantization(model: torch.nn.Module, hw_config_dict: dict,\n should_be_quantize_inputs: bool = True):\n nncf_config = get_quantization_config_without_range_init(model_size=1)\n nncf_config[\"compression\"].update({\"quantize_inputs\": should_be_quantize_inputs})\n nncf_config[\"target_device\"] = \"mock\"\n\n net = NNCFNetwork(model, input_infos=[ModelInputInfo([1, 2, 1, 1])])\n hw_config = HWConfig.from_dict(hw_config_dict)\n qbuilder = QuantizationBuilder(nncf_config[\"compression\"], should_init=False)\n qbuilder.hw_config = hw_config\n net = qbuilder.apply_to(net)\n ctrl = qbuilder.build_controller(net)\n return net, ctrl\n\n @staticmethod\n def quantizer_has_default_config(quantizer: BaseQuantizer) -> bool:\n default_qconfig = QuantizerSetupGeneratorBase.DEFAULT_QUANTIZER_CONFIG\n is_ok = True\n is_ok &= (quantizer.num_bits == default_qconfig.num_bits)\n is_ok &= (quantizer.per_channel == default_qconfig.per_channel)\n if default_qconfig.signedness_to_force is not None:\n is_ok &= (quantizer.signed == default_qconfig.signedness_to_force)\n is_ok &= isinstance(quantizer,\n SymmetricQuantizer if default_qconfig.mode == QuantizationMode.SYMMETRIC else\n AsymmetricQuantizer)\n return is_ok\n\n @staticmethod\n def get_quantizer_module_after_op_name(op_name: str, ctrl: QuantizationController) -> BaseQuantizer:\n input_matches = list(filter(lambda x: x.ia_op_exec_context.operator_name == op_name,\n ctrl.non_weight_quantizers.keys()))\n assert len(input_matches) == 1\n act_quant_key = input_matches[0]\n act_quantizer_ref = ctrl.non_weight_quantizers[act_quant_key].quantizer_module_ref\n return act_quantizer_ref\n\n def test_missing_ir_op_results_in_fp32(self):\n hw_config_dict = {\n \"target_device\": \"test\",\n \"config\": {\n \"quantization\": {\n \"q8_a\": {\n \"bits\": 8,\n \"mode\": [\n \"symmetric\",\n \"asymmetric\"\n ],\n \"granularity\": \"pertensor\"\n },\n }\n },\n \"operations\": [\n {\n \"type\": \"MatMul\",\n \"quantization\": {\n \"activations\": \"q8_a\",\n \"weights\": \"q8_a\"\n }\n },\n ]\n }\n\n _, ctrl = self.get_model_and_ctrl_with_applied_hw_config_quantization(ModelForHWConfigTest(with_gelu=False),\n hw_config_dict, False)\n assert len(ctrl.weight_quantizers) == 0 # Conv2d weights remain unquantized\n assert len(ctrl.non_weight_quantizers) == 1 # Only the matmul input is quantized\n\n key = next(iter(ctrl.non_weight_quantizers.keys()))\n # Corresponds to a quantizer AFTER conv2d, i.e. matmul input quantizer\n assert key.ia_op_exec_context.operator_name == \"conv2d\"\n\n def test_missing_non_ir_op_results_in_default_qconf_list(self):\n # GELU is the non-IR op here, adjust if this no longer reflects reality\n hw_config_dict = {\n \"target_device\": \"test\",\n \"config\": {\n \"quantization\": {\n \"q4_a\": {\n \"bits\": 4,\n \"mode\": [\n \"symmetric\",\n \"asymmetric\"\n ],\n \"granularity\": \"pertensor\"\n },\n }\n },\n \"operations\": [\n {\n \"type\": \"MatMul\",\n \"quantization\": {\n \"activations\": \"q4_a\",\n \"weights\": \"q4_a\"\n },\n },\n {\n\n \"type\": \"Convolution\",\n \"quantization\": {\n \"activations\": \"q4_a\",\n \"weights\": \"q4_a\"\n }\n },\n ]\n }\n\n _, ctrl = self.get_model_and_ctrl_with_applied_hw_config_quantization(ModelForHWConfigTest(with_gelu=True),\n hw_config_dict)\n assert len(ctrl.weight_quantizers) == 1 # Conv2d weights quantized\n assert len(ctrl.non_weight_quantizers) == 3 # GELU input, conv2d input, matmul input (single in this case)\n\n w_key = next(iter(ctrl.weight_quantizers.keys()))\n assert str(w_key.scope) == \"ModelForHWConfigTest/NNCFConv2d[conv2d]\"\n\n gelu_input_act_quantizer_ref = self.get_quantizer_module_after_op_name(MODEL_INPUT_OP_NAME, ctrl)\n assert self.quantizer_has_default_config(gelu_input_act_quantizer_ref)\n\n def test_unspecified_quantization_for_fundamentally_quantizable_op_results_in_default_qconfig(self):\n hw_config_dict = { # Only the MatMul will receive a default config here (8 bit symmetric per-tensor)\n \"target_device\": \"test\",\n \"config\": {\n \"quantization\": {\n \"q4_a\": {\n \"bits\": 4,\n \"mode\": [\n \"symmetric\",\n \"asymmetric\"\n ],\n \"granularity\": \"pertensor\"\n },\n }\n },\n \"operations\": [\n {\n \"type\": \"MatMul\"\n },\n {\n\n \"type\": \"Convolution\",\n \"quantization\": {\n \"activations\": \"q4_a\",\n \"weights\": \"q4_a\"\n }\n },\n ]\n }\n\n _, ctrl = self.get_model_and_ctrl_with_applied_hw_config_quantization(ModelForHWConfigTest(with_gelu=False),\n hw_config_dict, False)\n assert len(ctrl.weight_quantizers) == 1 # Conv2d weights quantized\n conv2d_weight_quantizer_ref = list(ctrl.weight_quantizers.values())[0].quantizer_module_ref\n assert not self.quantizer_has_default_config(conv2d_weight_quantizer_ref)\n\n assert len(ctrl.non_weight_quantizers) == 1 # Matmul input\n matmul_input_matches = list(filter(lambda x: x.ia_op_exec_context.operator_name == \"conv2d\",\n ctrl.non_weight_quantizers.keys()))\n\n assert len(matmul_input_matches) == 1\n matmul_quantizer_ref = ctrl.non_weight_quantizers[matmul_input_matches[0]].quantizer_module_ref\n assert self.quantizer_has_default_config(matmul_quantizer_ref)\n\n non_matmul_input_matches = list(filter(lambda x: x.ia_op_exec_context.operator_name != \"conv2d\",\n ctrl.non_weight_quantizers.keys()))\n for quantizer_id in non_matmul_input_matches:\n quantizer_ref = ctrl.non_weight_quantizers[quantizer_id].quantizer_module_ref\n assert not self.quantizer_has_default_config(quantizer_ref)\n\n def test_unspecified_quantization_for_weighted_op_results_in_default_qconf_list_for_weights(self):\n hw_config_dict = {\n \"target_device\": \"test\",\n \"config\": {\n \"quantization\": {\n \"q4_a\": {\n \"bits\": 4,\n \"mode\": [\n \"symmetric\",\n \"asymmetric\"\n ],\n \"granularity\": \"pertensor\"\n },\n }\n },\n \"operations\": [\n {\n \"type\": \"MatMul\"\n },\n {\n \"type\": \"Convolution\"\n },\n ]\n }\n\n _, ctrl = self.get_model_and_ctrl_with_applied_hw_config_quantization(ModelForHWConfigTest(with_gelu=False),\n hw_config_dict)\n assert len(ctrl.weight_quantizers) == 1 # Conv2d weights quantized with default config\n assert len(ctrl.non_weight_quantizers) == 2 # All inputs are quantized.\n for quantizer_ref in ctrl.all_quantizations.values():\n assert self.quantizer_has_default_config(quantizer_ref)\n"
] |
[
[
"torch.ones",
"torch.nn.ConvTranspose2d",
"torch.zeros",
"torch.eq",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.FloatTensor",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.chunk",
"torch.nn.ReLU",
"torch.nn.functional.linear",
"torch.ones_like"
],
[
"torch.nn.functional.gelu",
"torch.nn.Conv2d"
]
] |
Depersonalizc/nerf-pytorch
|
[
"dbc0211a059834ae078eb3a99ad8e20bd1171d2f"
] |
[
"nerf/volume_rendering_utils.py"
] |
[
"import torch\n\nfrom .nerf_helpers import cumprod_exclusive\n\n\ndef volume_render_radiance_field(\n radiance_field,\n depth_values,\n ray_directions,\n radiance_field_noise_std=0.0,\n white_background=False,\n\n render_rgb=True,\n render_disp=True,\n render_acc=True,\n render_depth=True,\n):\n # TESTED\n one_e_10 = torch.tensor(\n [1e10], dtype=ray_directions.dtype, device=ray_directions.device\n )\n dists = torch.cat(\n (\n depth_values[..., 1:] - depth_values[..., :-1],\n one_e_10.expand(depth_values[..., :1].shape),\n ),\n dim=-1,\n )\n dists = dists * ray_directions[..., None, :].norm(p=2, dim=-1)\n\n rgb = torch.sigmoid(radiance_field[..., :3])\n noise = 0.0\n if radiance_field_noise_std > 0.0:\n noise = (\n torch.randn(\n radiance_field[..., 3].shape,\n dtype=radiance_field.dtype,\n device=radiance_field.device,\n )\n * radiance_field_noise_std\n )\n # noise = noise.to(radiance_field)\n sigma_a = torch.nn.functional.relu(radiance_field[..., 3] + noise)\n alpha = 1.0 - torch.exp(-sigma_a * dists)\n weights = alpha * cumprod_exclusive(1.0 - alpha + 1e-10)\n\n rgb_map, disp_map, acc_map, depth_map = None, None, None, None\n if render_rgb:\n rgb_map = weights[..., None] * rgb\n rgb_map = rgb_map.sum(dim=-2)\n if render_depth:\n depth_map = weights * depth_values\n depth_map = depth_map.sum(dim=-1)\n if render_acc:\n acc_map = weights.sum(dim=-1)\n if render_disp:\n disp_map = 1.0 / torch.max(1e-10 * torch.ones_like(depth_map), depth_map / acc_map)\n\n if white_background and render_rgb:\n rgb_map = rgb_map + (1.0 - acc_map[..., None])\n\n return rgb_map, disp_map, acc_map, weights, depth_map\n"
] |
[
[
"torch.sigmoid",
"torch.randn",
"torch.tensor",
"torch.exp",
"torch.nn.functional.relu",
"torch.ones_like"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.