repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
leepand/IronManFly
|
[
"ea29bf1415a51b23a0fbc95f2bae82013a0887a4"
] |
[
"IronManFly/bandits_server/src/bandits.py"
] |
[
"import numpy as np\n\nclass EpsilonGreedy(object):\n def __init__(self,n,decay=100):\n self.counts = [0] * n\n self.values = [0.] * n\n self.decay = decay\n self.n = n\n\n def get_epsilon(self):\n total = np.sum(self.counts)\n return float(self.decay) / (total + float(self.decay))\n \n def choose_arm(self):\n epsilon = self.get_epsilon()\n if np.random.random() > epsilon:\n return np.argmax(self.values)\n else:\n return np.random.randint(self.n)\n \n def update(self,arm,reward):\n self.counts[arm] = self.counts[arm] + 1\n n = self.counts[arm]\n value = self.values[arm]\n new_value = ((n - 1) / float(n)) * value + (1 / float(n)) * reward\n self.values[arm] = new_value"
] |
[
[
"numpy.argmax",
"numpy.random.random",
"numpy.sum",
"numpy.random.randint"
]
] |
tgao1337/flexible-pomodoro-tree
|
[
"745ec155a6d64f0e44ba5998911b6acb69f6a8e5"
] |
[
"oled-plot.py"
] |
[
"import matplotlib.pyplot as plt\nimport SSD1106\nfrom PIL import Image\nimport numpy as np\n\ndisplay = SSD1106.SSD1106()\ndisplay.setup()\ndisplay.clear()\n\n# Creatinng a plot figure of size 1.3x0.9 inches (OLED display size)\nfig = plt.figure(figsize=(1.28, 0.64), dpi=100)\nax = fig.add_axes((0.15, 0.35, 0.8, 0.5))\n\n# ===== PLOT 1\n'''\nx = np.linspace(0, 10, 100)\ny = 4 + 2 * np.sin(2 * x)\n\nax.plot(x, y, linewidth=1.0)\n\nax.set(xlim=(0, 8), xticks=np.arange(0, 9, 4),\n ylim=(0, 8), yticks=np.arange(0, 9, 4))\n'''\n# ===== END OF PLOT 1\n\n# ===== PLOT 2\n\nax.plot([1,2,3], [2,4,8])\nax.set(xlim=(0, 4), xticks=np.arange(0, 9, 1),\n ylim=(0, 8), yticks=np.arange(0, 9, 4))\n\n# ===== END OF PLOT 2\n\n\n# ===== BAR GRAPH\n'''\nnp.random.seed(3)\nx = 0.5 + np.arange(8)\ny = np.random.uniform(2, 7, len(x))\n\nax.bar(x, y, width=1, edgecolor=\"white\", linewidth=0.7)\n\nax.set(xlim=(0, 8), xticks=np.arange(0, 9, 4),\n ylim=(0, 8), yticks=np.arange(0, 9, 4))\n'''\n# ===== END OF BAR GRAPH\n\n\n\n# Saving plot\nplt.savefig(\"plot.png\", format=\"png\")\n\n\n# Creating a blank canvas\nimage = Image.new('1', (128,64), 255)\n\n# Open the plot\nplot = Image.open(\"plot.png\")\n# Paste plot on existing canvas\n\nimage.paste(plot)\n\nimage = image.rotate(180)\n\ndisplay.ShowImage(display.getbuffer(image))\n\n"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] |
OscarDPan/elephas
|
[
"4f3b16e16319aba54d7f67364d228c96d23fa460"
] |
[
"examples/basic_import.py"
] |
[
"from elephas.java import java_classes, adapter\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n\nmodel = Sequential()\nmodel.add(Dense(units=64, activation='relu', input_dim=100))\nmodel.add(Dense(units=10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n\nmodel.save('test.h5')\n\n\nkmi = java_classes.KerasModelImport\nfile = java_classes.File(\"test.h5\")\n\njava_model = kmi.importKerasSequentialModelAndWeights(file.absolutePath)\n\nweights = adapter.retrieve_keras_weights(java_model)\nmodel.set_weights(weights)"
] |
[
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential"
]
] |
jim-meyer/tensorflow-onnx
|
[
"2b3c23da102c875737362f858b78fa50ae48809f"
] |
[
"tf2onnx/rewriter/gru_rewriter.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\"\"\"\ntf2onnx.rewriter.gru_rewriter\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nfrom tf2onnx import utils\nfrom tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node\n\nfrom tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnRewriterBase\n\n# pylint: disable=invalid-name,unused-argument,missing-docstring\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass GRUUnitRewriter(UnitRnnRewriterBase):\n def __init__(self, g):\n super(GRUUnitRewriter, self).__init__(g)\n self.gru_cell_type = None\n self.state_variable_handlers = [\n {\"state\": (self._state_variable_finder, self._connect_gru_state_to_graph)}\n ]\n\n def find_cell(self, context):\n gru_cell_types = [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell]\n for cell_type in gru_cell_types:\n cell_match = self._match_cell(context, cell_type)\n if cell_match:\n self.gru_cell_type = cell_type\n logger.debug(\"parsing unit is %s\", cell_type)\n return cell_match\n logger.debug(\"cannot parse unit\")\n return None\n\n def get_weight_and_bias(self, context):\n match = context.cell_match\n\n gate_kernel = get_weights_from_const_node(self.g, match.get_op(\"gate_kernel\"))\n gate_bias = get_weights_from_const_node(self.g, match.get_op(\"gate_bias\"))\n hidden_kernel = get_weights_from_const_node(self.g, match.get_op(\"hidden_kernel\"))\n hidden_bias = get_weights_from_const_node(self.g, match.get_op(\"hidden_bias\"))\n if not all([gate_kernel, gate_bias, hidden_kernel, hidden_bias]):\n logger.debug(\"rnn weights check failed, skip\")\n return None\n\n logger.debug(\"find needed weights\")\n res = {\"gate_kernel\": gate_kernel,\n \"gate_bias\": gate_bias,\n \"hidden_kernel\": hidden_kernel,\n \"hidden_bias\": hidden_bias}\n return res\n\n def _state_variable_finder(self, context):\n if self.gru_cell_type == RNNUnitType.GRUCell:\n gru_cell = context.cell_match\n return self._find_state_variable_with_select(\n context,\n gru_cell.get_op(\"cell_output\").output[0],\n [gru_cell.get_op(\"cell_inputs\")]\n )\n if self.gru_cell_type == RNNUnitType.GRUBlockCell:\n gru_block_cell = context.cell_match.get_op(\"gru_block_cell\")\n return self._find_state_variable_with_select(\n context,\n gru_block_cell.output[3],\n [gru_block_cell]\n )\n return None\n\n def parse_attributes(self, context):\n # in tf, only activation of hidden gate is optional, input and update gate always use sigmoid\n match = context.cell_match\n activations = [\"sigmoid\", \"Tanh\"]\n if self.gru_cell_type == RNNUnitType.GRUCell:\n activation_op = match.get_op(\"optional_activation\")\n activations = [\"sigmoid\", activation_op.type]\n context.attributes[\"activations\"] = activations\n return True\n\n def is_valid(self, context):\n # except for ct, ht or ct_ht, there are at most 2 state variables\n other_state_variables_num = len(context.loop_properties.state_variables) - \\\n len(context.state_variables)\n if other_state_variables_num > 2:\n logger.debug(\"found %d other state variables\", other_state_variables_num)\n return False\n\n # output should be no more than 1\n outputs = context.loop_properties.scan_outputs_exits\n if len(outputs) > 1:\n logger.debug(\"found %d outputs for gru: %s\", len(outputs), outputs)\n return False\n return True\n\n def process_weights_and_bias(self, context):\n \"\"\"\n why split the data in this way should refer to code of tensorflow GRU cell and official document of ONNX GRU\n \"\"\"\n weights = context.weights\n # from code of tensorflow GRU cell, it can be known that shape of hidden_kernel(or candidate_kernel)\n # is (input_size+hidden_unit, hidden_unit)\n hidden_size = weights[\"hidden_kernel\"].value.shape[1]\n input_size = weights[\"hidden_kernel\"].value.shape[0] - hidden_size\n weight_dtype = weights[\"hidden_kernel\"].dtype\n bias_dtype = weights[\"hidden_bias\"].dtype\n # below code will use same notation as ONNX document\n # z means update gate, r means reset gate, h means hidden gate;\n # at this time weights of gate include input and state, will split it next\n r_kernel, z_kernel = np.split(weights[\"gate_kernel\"].value, [hidden_size], axis=1)\n h_kernel = weights[\"hidden_kernel\"].value\n r_bias, z_bias = np.split(weights[\"gate_bias\"].value, [hidden_size], axis=0)\n h_bias = weights[\"hidden_bias\"].value\n # ONNX GRU split weights of input and state, so have to split *_kernel\n input_r_kernel, state_r_kernel = np.split(r_kernel, [input_size], axis=0)\n input_z_kernel, state_z_kernel = np.split(z_kernel, [input_size], axis=0)\n input_h_kernel, state_h_kernel = np.split(h_kernel, [input_size], axis=0)\n W_zrh = np.concatenate((input_z_kernel, input_r_kernel, input_h_kernel), axis=1)\n R_zrh = np.concatenate((state_z_kernel, state_r_kernel, state_h_kernel), axis=1)\n # transpose weight matrix\n W_zrh = np.transpose(np.expand_dims(W_zrh, axis=0), axes=(0, 2, 1))\n R_zrh = np.transpose(np.expand_dims(R_zrh, axis=0), axes=(0, 2, 1))\n W_zrh = W_zrh.astype(weight_dtype)\n R_zrh = R_zrh.astype(weight_dtype)\n assert W_zrh.shape == (1, 3*hidden_size, input_size)\n assert R_zrh.shape == (1, 3*hidden_size, hidden_size)\n Wb_zrh = np.concatenate((z_bias, r_bias, h_bias), axis=0)\n # tf don't have bias for state, so use 0 instead\n zero = np.zeros_like(z_bias)\n Rb_zrh = np.concatenate((zero, zero, zero), axis=0)\n B_zrh = np.concatenate((Wb_zrh, Rb_zrh), axis=0)\n B_zrh = np.expand_dims(B_zrh, axis=0)\n B_zrh = B_zrh.astype(bias_dtype)\n assert B_zrh.shape == (1, 6*hidden_size)\n # create const ONNX node\n w_name = utils.make_name(\"W\")\n w_node = self.g.make_const(w_name, W_zrh, skip_conversion=True)\n\n r_name = utils.make_name(\"R\")\n r_node = self.g.make_const(r_name, R_zrh, skip_conversion=True)\n\n b_name = utils.make_name(\"B\")\n b_node = self.g.make_const(b_name, B_zrh, skip_conversion=True)\n\n context.input_size = input_size\n context.hidden_size = hidden_size\n context.onnx_input_ids[\"W\"] = w_node.output[0]\n context.onnx_input_ids[\"R\"] = r_node.output[0]\n context.onnx_input_ids[\"B\"] = b_node.output[0]\n\n def process_var_init_nodes(self, context):\n assert \"state\" in context.state_variables.keys()\n initializer_input_id = context.state_variables[\"state\"].enter_input_id\n node = self.g.get_node_by_output(initializer_input_id)\n if node.is_const():\n val = node.get_tensor_value(as_list=False)\n initial_name = utils.make_name(\"Const\")\n new_val = np.expand_dims(val, axis=0)\n const_node = self.g.make_const(initial_name, new_val)\n context.onnx_input_ids[\"initial_state\"] = const_node.output[0]\n return\n squeeze_node = self.g.make_node(\"Unsqueeze\", [initializer_input_id], attr={\"axes\": [0]})\n to_replace = [n for n in self.g.get_nodes() if n != squeeze_node]\n self.g.replace_all_inputs(to_replace, initializer_input_id, squeeze_node.output[0])\n context.onnx_input_ids[\"initial_state\"] = squeeze_node.output[0]\n\n def create_rnn_node(self, context):\n # specify if the RNN is forward, reverse, or bidirectional.\n # Must be one of forward (default), reverse, or bidirectional.\n # Here we won't mark bidirectional/reverse, we will have another rewriter running after this one,\n # which will based on patterns to combine a forward GRU and a backward GRU into a bidirectional one.\n num_direction = 1\n # todo: input_forget\n context.attributes[\"direction\"] = \"forward\"\n context.attributes[\"hidden_size\"] = context.hidden_size\n inputs = context.onnx_input_ids\n gru_inputs = [\n inputs[\"X\"], inputs[\"W\"], inputs[\"R\"], inputs[\"B\"],\n inputs[\"sequence_lens\"], inputs[\"initial_state\"]]\n x_shape = self.g.get_shape(gru_inputs[0])\n x_seq_length = x_shape[0]\n x_batch_size = x_shape[1]\n out_dtype = self.g.get_dtype(gru_inputs[0])\n gru_node = self.g.make_node(\"GRU\", gru_inputs, attr=context.attributes, output_count=2,\n shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size],\n [num_direction, x_batch_size, context.hidden_size]],\n dtypes=[out_dtype, out_dtype])\n return gru_node\n\n def _connect_gru_state_to_graph(self, context):\n # in tf, state output shape is: [batch, hidden]\n # in onnx, output shape is: [number_directions, batch, hidden]\n exit_output_id = context.state_variables[\"state\"].exit_output.id\n if not exit_output_id:\n logger.debug(\"no one consume state variable\")\n return\n output_id = context.rnn_node.output[1]\n gru_state_shape = self.g.get_shape(output_id)\n output_shape = [gru_state_shape[1], gru_state_shape[2]]\n squeeze_node = self.g.make_node(\"Squeeze\", [output_id], attr={\"axes\": [0]},\n shapes=[output_shape], dtypes=[self.g.get_dtype(output_id)])\n\n self.g.replace_all_inputs(self.g.get_nodes(), exit_output_id, squeeze_node.output[0])\n"
] |
[
[
"numpy.concatenate",
"numpy.split",
"numpy.expand_dims",
"numpy.zeros_like"
]
] |
denguir/drl-rpn-video
|
[
"8e52ad0529da2cf5982f00bf836536819e0951ba"
] |
[
"lib/test_script/save_test.py"
] |
[
"import tensorflow as tf \nimport numpy as np \n\nweights3d_dir = '/home/vador/Documents/project/AI/drl-rpn-tf-video/pretrained-data/data3D/'\nmodel3d = weights3d_dir + 'model_test'\n\nvn1 = np.array(np.random.randint(0, 10, (4,5,5)))\nvt1 = tf.Variable(vn1, name='v1')\n\nvn2 = np.array(np.random.randint(0, 10, (4,5,5)))\nvt2 = tf.Variable(vn2, name='v2')\n\nvt3 = tf.multiply(vt1, vt2)\n\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n sess.run(init)\n saver.save(sess, model3d)\n\n\n"
] |
[
[
"tensorflow.multiply",
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.random.randint"
]
] |
CANGA/MIRA
|
[
"2f1214d34b884790fa8660b5208cd12495800f92"
] |
[
"src/computeAreaIntegralSE.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 17 11:29:22 2018\n\nComputes array of Jacobian weights for a given SE element of order 4\n\ncoords: double node data from .g file\nconnect: int connectivity data\n\n@author: jeguerra\n\"\"\"\nimport math as mt\nimport numpy as np\nfrom scipy.linalg import norm\n\n# Order 4 Gauss quadrature nodes and weights\n\n\ndef getGLLNodesWeights(order):\n # \"\"\" 2nd order method\n if order == 2:\n GN = [-1.0,\n 0.0\n + 1.0]\n\n GW = [1.0 / 3.0,\n 4.0 / 3.0,\n 1.0 / 3.0]\n # \"\"\" 4th order method\n if order == 4:\n GN = [-1.0,\n -mt.sqrt(0.2),\n +mt.sqrt(0.2),\n +1.0]\n\n GW = [1.0 / 6.0,\n 5.0 / 6.0,\n 5.0 / 6.0,\n 1.0 / 6.0]\n # \"\"\"\n\n # Scale the points/weights to [0 1]\n ovec = np.ones(np.size(GN))\n GN = 0.5 * np.matrix(np.add(GN, ovec))\n GW = 0.5 * np.matrix(GW)\n\n return np.ravel(GN), \\\n np.ravel(GW)\n\n\ndef computeCart2LL(cellCoord):\n\n RO = np.linalg.norm(cellCoord)\n psi = mt.asin(1.0 / RO * cellCoord[2])\n lam = mt.atan2(-cellCoord[0], -cellCoord[1]) + mt.pi\n pointLonLat = [360.0 * lam / (2.0 * mt.pi), 180.0 * psi / mt.pi]\n\n return pointLonLat\n\n\ndef computeAreaIntegralSE(varCoords, order):\n # avg = Boolean flag to take average of the function\n # farea = Boolean flag to compute only the area integral (ignore field)\n\n # Initialize the area\n dFaceArea = 0.0\n\n # Loop over the subtriangles and add up the areas\n GN, GW = getGLLNodesWeights(order)\n NP = len(GW)\n\n # Set the connectivity index vector corresponding to varCoords\n cdex = [0, 1, 2, 3, 11, 12, 13, 4, 10, 15, 14, 5, 9, 8, 7, 6]\n\n # Compute the plane edge directions for this quadrilateral\n nD30 = np.subtract(varCoords[:, 3], varCoords[:, 0])\n nD69 = np.subtract(varCoords[:, 6], varCoords[:, 9])\n nD90 = np.subtract(varCoords[:, 9], varCoords[:, 0])\n nD63 = np.subtract(varCoords[:, 6], varCoords[:, 3])\n\n # Loop over the quadrature points\n dF = np.zeros((3,))\n dDaF = np.zeros((3, 1))\n dDbF = np.zeros((3, 1))\n dJacobianGWppqq = np.zeros((len(cdex)))\n rr = 0\n for pp in range(NP):\n for qq in range(NP):\n # Enhance...\n dA = GN[pp]\n dB = GN[qq]\n\n dOmdA = (1.0 - dA)\n dOmdB = (1.0 - dB)\n\n # Fetch global coords of this quadrature point (on the plane)\n dF[0] = varCoords[0, 0] * dOmdA * dOmdB \\\n + varCoords[0, 3] * dA * dOmdB \\\n + varCoords[0, 6] * dA * dB \\\n + varCoords[0, 9] * dOmdA * dB\n dF[1] = varCoords[1, 0] * dOmdA * dOmdB \\\n + varCoords[1, 3] * dA * dOmdB \\\n + varCoords[1, 6] * dA * dB \\\n + varCoords[1, 9] * dOmdA * dB\n dF[2] = varCoords[2, 0] * dOmdA * dOmdB \\\n + varCoords[2, 3] * dA * dOmdB \\\n + varCoords[2, 6] * dA * dB \\\n + varCoords[2, 9] * dOmdA * dB\n\n dF2 = dF**2\n dR = norm(dF, 2)\n\n # Local secant vector between this node and the next in a direction\n dDaF[:, 0] = [dOmdB * nD30[0] + dB * nD69[0],\n dOmdB * nD30[1] + dB * nD69[1],\n dOmdB * nD30[2] + dB * nD69[2]]\n\n # Local secant vector between this node and the next in b direction\n dDbF[:, 0] = [dOmdA * nD90[0] + dA * nD63[0],\n dOmdA * nD90[1] + dA * nD63[1],\n dOmdA * nD90[2] + dA * nD63[2]]\n\n # Set the spherical metric...\n dDGMat = np.array([[(dF2[1] + dF2[2]), - dF[0] * dF[1], - dF[0] * dF[2]],\n [- dF[1] * dF[0],\n (dF2[0] + dF2[2]), - dF[1] * dF[2]],\n [- dF[2] * dF[0], - dF[2] * dF[1], (dF2[0] + dF2[1])]])\n\n # Compute the local tangent vectors in a and b directions\n dDaG = np.dot(dDGMat, dDaF)\n dDbG = np.dot(dDGMat, dDbF)\n\n dDenomTerm = 1.0 / (dR**3)\n\n # This happens to dDaG twice...\n dDaG *= dDenomTerm\n dDbG *= dDenomTerm\n\n # Compute the pointwise Jacobian weight\n dJV = np.cross(np.ravel(dDaG), np.ravel(dDbG))\n dJacobianGWppqq[cdex[rr]] = norm(dJV, 2) * GW[pp] * GW[qq]\n\n # Sum up the cell area\n dFaceArea += dJacobianGWppqq[cdex[rr]]\n rr += 1\n\n # Return the area of this element and a set of Jacobian weights\n return dFaceArea, dJacobianGWppqq\n"
] |
[
[
"numpy.matrix",
"numpy.dot",
"numpy.subtract",
"numpy.linalg.norm",
"numpy.add",
"numpy.size",
"scipy.linalg.norm",
"numpy.ravel",
"numpy.array",
"numpy.zeros"
]
] |
KATOKanji-0131/gloss-format-docxtable
|
[
"9896f4fe2643760febe67d5e8bb015db5aa22880"
] |
[
"main.py"
] |
[
"import docx\nfrom docx.shared import Pt\nfrom docx.shared import Mm\nimport math\nimport os.path\nimport pandas as pd\nimport re\nimport varibles\n\nwith open(\"example.txt\") as f:\n text_lines = f.readlines()\n\n# グロスと略語のリストを作り、略語の文字数でソート。文字数の長い順に処理しないと、NONPASTのPASTに\"PAST\"がヒットしてしまうため。\nabbreviations = pd.read_table(\"gloss_abbreviations.tsv\", header=None).values.tolist()\nabbreviations = sorted(abbreviations, reverse=True, key=lambda x: len(x[0]))\nabbreviations_used = []\n\nmorpheme_texts = []\ngloss_texts = []\ntranslation_texts = []\n\nmorph_spcf = varibles.morph_spcf # 形態素行の指定子\ngl_spcf = varibles.gl_spcf # グロス行の指定子\ntrsl_spcf = varibles.trsl_spcf # 訳行の指定子\n\n# テキストから行を抽出してリストに格納する\nfor line in text_lines:\n if re.match(morph_spcf, line) is not None:\n line = re.sub(f\"{morph_spcf}\\s*\", r\"\", line.rstrip(\"\\n\"))\n morpheme_texts.append(line.split())\n if re.match(gl_spcf, line) is not None:\n line = re.sub(f\"{gl_spcf}\\s*\", r\"\", line.rstrip(\"\\n\"))\n gloss_texts.append(line.split())\n if re.match(trsl_spcf, line) is not None:\n line = re.sub(f\"{trsl_spcf}\\s*\", r\"\", line.rstrip(\"\\n\"))\n translation_texts.append(line)\n\n# ワードドキュメント作成\ndoc = docx.Document()\n\n# 表整形の処理。各ループは例文一つに相当。\nexno = 0 # 例文番号\nfor mp_block, gl_block, tr in zip(morpheme_texts, gloss_texts, translation_texts):\n table = doc.add_table(rows=2, cols=9)\n # table.autofit = False\n table.allow_autofit = False\n l = 0 # 例文の行数。はみ出しそうになったらlを+1して、次の行に続ける。\n\n # 一番左上のセルには例文番号を入れる\n table.rows[0].cells[0].text = \"(\" + str(exno+1) + \")\"\n table.rows[0].cells[0].width = Mm(10)\n\n m = 0 # 形態素の番号。\n cell_width_sum = 0 # セルの長さの合計値。これがページ幅を超える時に改行する。\n\n for mp, gl in zip(mp_block, gl_block):\n # 表幅がページ幅を超えた時の処理\n if m > 7 or cell_width_sum > Mm(220):\n row = table.add_row()\n row = table.add_row()\n m = 0\n l += 1\n cell_width_sum = 0\n col_length = max(len(mp), len(gl))\n\n # 行数*2 番目の行に形態素、行数*2-1 番目の行にグロスをいれる。\n mp_row = table.rows[l * 2]\n gl_row = table.rows[l * 2 + 1]\n m += 1\n mp_row.cells[m].text = mp\n\n # もしglがグロス一覧にあるなら、その部分をスモールキャップに変換して、セルに追加する。\n for abb in abbreviations:\n if abb[0] not in gl:\n continue\n # 略号でない部分 (=prefixとsuffixの整形)\n try:\n prefix = re.match(f\".+?(?={abb[0]})\", gl).group()\n except AttributeError:\n prefix = \"\"\n try:\n suffix = re.match(f\"(?<={abb[0]}).+?$\", gl).group()\n except AttributeError:\n suffix = \"\"\n para = gl_row.cells[m].paragraphs[0]\n para.add_run(prefix)\n gloss = abb[0]\n p = para.add_run(gloss.lower())\n p.font.small_caps = True\n para.add_run(suffix)\n\n # 使用したabbreviationをリストに入れておき、後で略号一覧を出力する\n if abb not in abbreviations_used:\n abbreviations_used.append(abb)\n # print(abbreviations_used)\n\n # グロスが一つでもヒットしたら処理を終了したいのでbreak\n break\n\n # グロスに略号が含まれない場合、グロスをそのままセルに入れる\n if gl_row.cells[m].text == \"\":\n gl_row.cells[m].text = gl\n\n # 形態素とグロスのうち長い方に合わせてセル幅を設定する\n # ToDo: 一定の値を下回って小さくすることができなさそう?\n col_length = max(len(mp_row.cells[m].text), len(gl_row.cells[m].text))\n\n mp_row.cells[m].width = Pt(10*col_length)\n gl_row.cells[m].width = Pt(10*col_length)\n\n cell_width_sum += 10*col_length\n\n row = table.add_row()\n table.rows[-1].cells[1].text = tr\n # ToDo: 列高さ指定できるように\n for row in table.rows:\n row.height = Pt(8)\n doc.add_paragraph(\"\")\n exno += 1\n\n# 略号一覧の整形\nabbreviations_used = sorted(abbreviations_used, key=lambda x: x[0])\nabb_row_length = math.ceil(len(abbreviations_used)/2)\nabb_table = doc.add_table(rows=abb_row_length, cols=4)\nabb_table.allow_autofit = False\nfor row, i in zip(abb_table.rows, range(abb_row_length)):\n row.cells[0].text = abbreviations_used[i][0]\n row.cells[1].text = abbreviations_used[i][1]\n row.cells[2].text = abbreviations_used[i+abb_row_length][0]\n row.cells[3].text = abbreviations_used[i+abb_row_length][1]\nabb_table.columns[0].width = Pt(30)\n\n# ToDo セル幅を個別に指定できるように\n# ToDo 余白を指定できるように\n# ToDo 数字の後ろに英字を出力できるように\n\n# ファイル名を指定して保存\nfile_no = 1\nif os.path.exists(f\"{varibles.file_name}.docx\"):\n while os.path.exists(f\"{varibles.file_name}({file_no}).docx\") is True:\n file_no += 1\n doc.save(f\"{varibles.file_name}({file_no}).docx\")\nelse:\n doc.save(f\"{varibles.file_name}.docx\")\n\n# doc.save(f\"{varibles.file_name}.docx\")\n"
] |
[
[
"pandas.read_table"
]
] |
StevenJokess/autogluon
|
[
"bdbaac2d13d14d075b7aa751561f0bbd39927789"
] |
[
"tabular/src/autogluon/tabular/trainer/abstract_trainer.py"
] |
[
"import copy, time, traceback, logging\nimport os\nfrom typing import List, Union, Tuple\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport psutil\nfrom collections import defaultdict\n\nfrom autogluon.core.constants import AG_ARGS, AG_ARGS_FIT, BINARY, MULTICLASS, REGRESSION, QUANTILE, REFIT_FULL_NAME, REFIT_FULL_SUFFIX\nfrom autogluon.core.models import AbstractModel, BaggedEnsembleModel, StackerEnsembleModel, WeightedEnsembleModel\nfrom autogluon.core.features.feature_metadata import FeatureMetadata\nfrom autogluon.core.scheduler.scheduler_factory import scheduler_factory\nfrom autogluon.core.utils import default_holdout_frac, get_pred_from_proba, generate_train_test_split, infer_eval_metric, compute_permutation_feature_importance, extract_column, compute_weighted_metric\nfrom autogluon.core.utils.exceptions import TimeLimitExceeded, NotEnoughMemoryError, NoValidFeatures, NoGPUError\nfrom autogluon.core.utils.loaders import load_pkl\nfrom autogluon.core.utils.savers import save_json, save_pkl\n\nfrom .utils import process_hyperparameters\nfrom ..augmentation.distill_utils import format_distillation_labels, augment_data\n\nlogger = logging.getLogger(__name__)\n\n\n# FIXME: Below is major defect!\n# Weird interaction for metrics like AUC during bagging.\n# If kfold = 5, scores are 0.9, 0.85, 0.8, 0.75, and 0.7, the score is not 0.8! It is much lower because probs are combined together and AUC is recalculated\n# Do we want this to happen? Should we calculate score by 5 separate scores and then averaging instead?\n\n# TODO: Dynamic model loading for ensemble models during prediction, only load more models if prediction is uncertain. This dynamically reduces inference time.\n# TODO: Try midstack Semi-Supervised. Just take final models and re-train them, use bagged preds for SS rows. This would be very cheap and easy to try.\n# TODO: Move to autogluon.core\nclass AbstractTrainer:\n trainer_file_name = 'trainer.pkl'\n trainer_info_name = 'info.pkl'\n trainer_info_json_name = 'info.json'\n distill_stackname = 'distill' # name of stack-level for distilled student models\n\n def __init__(self, path: str, problem_type: str, eval_metric=None,\n num_classes=None, quantile_levels=None, low_memory=False, feature_metadata=None, k_fold=0, n_repeats=1,\n sample_weight=None, weight_evaluation=False, save_data=False, random_state=0, verbosity=2):\n self.path = path\n self.problem_type = problem_type\n self.feature_metadata = feature_metadata\n self.save_data = save_data\n self.random_state = random_state # Integer value added to the stack level to get the random_state for kfold splits or the train/val split if bagging is disabled\n self.verbosity = verbosity\n self.sample_weight = sample_weight # TODO: consider redesign where Trainer doesnt need sample_weight column name and weights are separate from X\n self.weight_evaluation = weight_evaluation\n if eval_metric is not None:\n self.eval_metric = eval_metric\n else:\n self.eval_metric = infer_eval_metric(problem_type=self.problem_type)\n\n logger.log(25, f\"AutoGluon will gauge predictive performance using evaluation metric: '{self.eval_metric.name}'\")\n if not (self.eval_metric.needs_pred or self.eval_metric.needs_quantile):\n logger.log(25, \"\\tThis metric expects predicted probabilities rather than predicted class labels, so you'll need to use predict_proba() instead of predict()\")\n\n logger.log(20, \"\\tTo change this, specify the eval_metric argument of fit()\")\n self.num_classes = num_classes\n self.quantile_levels = quantile_levels\n self.feature_prune = False # will be set to True if feature-pruning is turned on.\n self.low_memory = low_memory\n self.bagged_mode = True if k_fold >= 2 else False\n if self.bagged_mode:\n self.k_fold = k_fold # int number of folds to do model bagging, < 2 means disabled\n self.n_repeats = n_repeats\n else:\n self.k_fold = 0\n self.n_repeats = 1\n\n self.model_best = None\n\n self.models = {} # Dict of model name -> model object. A key, value pair only exists if a model is persisted in memory. # TODO: v0.1 Rename and consider making private\n self.model_graph = nx.DiGraph() # Directed Acyclic Graph (DAG) of model interactions. Describes how certain models depend on the predictions of certain other models. Contains numerous metadata regarding each model.\n self.model_full_dict = {} # Dict of normal model -> FULL model. FULL models are produced by self.refit_single_full() and self.refit_ensemble_full().\n self._model_full_dict_val_score = {} # Dict of FULL model -> normal model validation score in case the normal model had been deleted.\n self.reset_paths = False\n\n self._time_limit = None # Internal float of the total time limit allowed for a given fit call. Used in logging statements.\n self._time_train_start = None # Internal timestamp of the time training started for a given fit call. Used in logging statements.\n\n self._num_rows_train = None\n self._num_cols_train = None\n\n self.is_data_saved = False\n self._X_saved = False\n self._y_saved = False\n self._X_val_saved = False\n self._y_val_saved = False\n\n self._groups = None # custom split indices\n\n self._regress_preds_asprobas = False # whether to treat regression predictions as class-probabilities (during distillation)\n\n self._extra_banned_names = set() # Names which are banned but are not used by a trained model.\n\n # self._exceptions_list = [] # TODO: Keep exceptions list for debugging during benchmarking.\n\n # path_root is the directory containing learner.pkl\n @property\n def path_root(self) -> str:\n return self.path.rsplit(os.path.sep, maxsplit=2)[0] + os.path.sep\n\n @property\n def path_utils(self) -> str:\n return self.path_root + 'utils' + os.path.sep\n\n @property\n def path_data(self) -> str:\n return self.path_utils + 'data' + os.path.sep\n\n def load_X(self):\n if self._X_saved:\n path = self.path_data + 'X.pkl'\n return load_pkl.load(path=path)\n return None\n\n def load_X_val(self):\n if self._X_val_saved:\n path = self.path_data + 'X_val.pkl'\n return load_pkl.load(path=path)\n return None\n\n def load_y(self):\n if self._y_saved:\n path = self.path_data + 'y.pkl'\n return load_pkl.load(path=path)\n return None\n\n def load_y_val(self):\n if self._y_val_saved:\n path = self.path_data + 'y_val.pkl'\n return load_pkl.load(path=path)\n return None\n\n def load_data(self):\n X = self.load_X()\n y = self.load_y()\n X_val = self.load_X_val()\n y_val = self.load_y_val()\n\n return X, y, X_val, y_val\n\n def save_X(self, X, verbose=True):\n path = self.path_data + 'X.pkl'\n save_pkl.save(path=path, object=X, verbose=verbose)\n self._X_saved = True\n\n def save_X_val(self, X, verbose=True):\n path = self.path_data + 'X_val.pkl'\n save_pkl.save(path=path, object=X, verbose=verbose)\n self._X_val_saved = True\n\n def save_y(self, y, verbose=True):\n path = self.path_data + 'y.pkl'\n save_pkl.save(path=path, object=y, verbose=verbose)\n self._y_saved = True\n\n def save_y_val(self, y, verbose=True):\n path = self.path_data + 'y_val.pkl'\n save_pkl.save(path=path, object=y, verbose=verbose)\n self._y_val_saved = True\n\n def get_model_names(self, stack_name: Union[List[str], str] = None, level: Union[List[int], int] = None, can_infer: bool = None, models: List[str] = None) -> List[str]:\n if models is None:\n models = list(self.model_graph.nodes)\n if stack_name is not None:\n if not isinstance(stack_name, list):\n stack_name = [stack_name]\n node_attributes: dict = self.get_models_attribute_dict(attribute='stack_name')\n models = [model_name for model_name in models if node_attributes[model_name] in stack_name]\n if level is not None:\n if not isinstance(level, list):\n level = [level]\n node_attributes: dict = self.get_models_attribute_dict(attribute='level')\n models = [model_name for model_name in models if node_attributes[model_name] in level]\n # TODO: can_infer is technically more complicated, if an ancestor can't infer then the model can't infer.\n if can_infer is not None:\n node_attributes = self.get_models_attribute_dict(attribute='can_infer')\n models = [model for model in models if node_attributes[model] == can_infer]\n return models\n\n def get_max_level(self, stack_name: str = None, models: List[str] = None) -> int:\n models = self.get_model_names(stack_name=stack_name, models=models)\n models_attribute_dict = self.get_models_attribute_dict(attribute='level', models=models)\n if models_attribute_dict:\n return max(list(models_attribute_dict.values()))\n else:\n return -1\n\n def construct_model_templates(self, hyperparameters: dict, **kwargs) -> Tuple[List[AbstractModel], dict]:\n \"\"\"Constructs a list of unfit models based on the hyperparameters dict.\"\"\"\n raise NotImplementedError\n\n def construct_model_templates_distillation(self, hyperparameters: dict, **kwargs) -> Tuple[List[AbstractModel], dict]:\n \"\"\"Constructs a list of unfit models based on the hyperparameters dict for softclass distillation.\"\"\"\n raise NotImplementedError\n\n def get_model_level(self, model_name: str) -> int:\n return self.get_model_attribute(model=model_name, attribute='level')\n\n def set_contexts(self, path_context):\n self.path, model_paths = self.create_contexts(path_context)\n for model, path in model_paths.items():\n self.set_model_attribute(model=model, attribute='path', val=path)\n\n def create_contexts(self, path_context: str) -> (str, dict):\n path = path_context\n model_paths = self.get_models_attribute_dict(attribute='path')\n for model, prev_path in model_paths.items():\n model_local_path = prev_path.split(self.path, 1)[1]\n new_path = path + model_local_path\n model_paths[model] = new_path\n\n return path, model_paths\n\n def fit(self, X, y, hyperparameters: dict, X_val=None, y_val=None, **kwargs):\n raise NotImplementedError\n\n # TODO: Enable easier re-mapping of trained models -> hyperparameters input (They don't share a key since name can change)\n def train_multi_levels(self, X, y, hyperparameters: dict, X_val=None, y_val=None, X_unlabeled=None, base_model_names: List[str] = None,\n feature_prune=False, core_kwargs: dict = None, aux_kwargs: dict = None,\n level_start=1, level_end=1, time_limit=None, name_suffix: str = None, relative_stack=True, level_time_modifier=0.333) -> List[str]:\n \"\"\"\n Trains a multi-layer stack ensemble using the input data on the hyperparameters dict input.\n hyperparameters is used to determine the models used in each stack layer.\n If continuing a stack ensemble with level_start>1, ensure that base_model_names is set to the appropriate base models that will be used by the level_start level models.\n Trains both core and aux models.\n core models are standard models which are fit on the data features. Core models will also use model predictions if base_model_names was specified or if level != 1.\n aux models are ensemble models which only use the predictions of core models as features. These models never use the original features.\n\n level_time_modifier : float, default 0.333\n The amount of extra time given relatively to early stack levels compared to later stack levels.\n If 0, then all stack levels are given 100%/L of the time, where L is the number of stack levels.\n If 1, then all stack levels are given 100% of the time, meaning if the first level uses all of the time given to it, the other levels won't train.\n Time given to a level = remaining_time / remaining_levels * (1 + level_time_modifier), capped by total remaining time.\n\n Returns a list of the model names that were trained from this method call, in order of fit.\n \"\"\"\n self._time_limit = time_limit\n self._time_train_start = time.time()\n time_train_start = self._time_train_start\n\n hyperparameters = self._process_hyperparameters(hyperparameters=hyperparameters)\n\n if relative_stack:\n if level_start != 1:\n raise AssertionError(f'level_start must be 1 when `relative_stack=True`. (level_start = {level_start})')\n level_add = 0\n if base_model_names:\n max_base_model_level = self.get_max_level(models=base_model_names)\n level_start = max_base_model_level + 1\n level_add = level_start - 1\n level_end += level_add\n if level_start != 1:\n hyperparameters_relative = {}\n for key in hyperparameters:\n if isinstance(key, int):\n hyperparameters_relative[key+level_add] = hyperparameters[key]\n else:\n hyperparameters_relative[key] = hyperparameters[key]\n hyperparameters = hyperparameters_relative\n\n core_kwargs = {} if core_kwargs is None else core_kwargs.copy()\n aux_kwargs = {} if aux_kwargs is None else aux_kwargs.copy()\n\n model_names_fit = []\n if level_start != level_end:\n logger.log(20, f'AutoGluon will fit {level_end - level_start + 1} stack levels (L{level_start} to L{level_end}) ...')\n for level in range(level_start, level_end + 1):\n core_kwargs_level = core_kwargs.copy()\n aux_kwargs_level = aux_kwargs.copy()\n if time_limit is not None:\n time_train_level_start = time.time()\n levels_left = level_end - level + 1\n time_left = time_limit - (time_train_level_start - time_train_start)\n time_limit_for_level = min(time_left / levels_left * (1 + level_time_modifier), time_left)\n time_limit_core = time_limit_for_level\n time_limit_aux = max(time_limit_for_level * 0.1, min(time_limit, 360)) # Allows aux to go over time_limit, but only by a small amount\n core_kwargs_level['time_limit'] = core_kwargs_level.get('time_limit', time_limit_core)\n aux_kwargs_level['time_limit'] = aux_kwargs_level.get('time_limit', time_limit_aux)\n if level != 1:\n feature_prune = False # TODO: Enable feature prune on levels > 1\n base_model_names, aux_models = self.stack_new_level(\n X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled,\n models=hyperparameters, level=level, base_model_names=base_model_names,\n feature_prune=feature_prune,\n core_kwargs=core_kwargs_level, aux_kwargs=aux_kwargs_level, name_suffix=name_suffix,\n )\n model_names_fit += base_model_names + aux_models\n self._time_limit = None\n self.save()\n return model_names_fit\n\n def stack_new_level(self, X, y, models: Union[List[AbstractModel], dict], X_val=None, y_val=None, X_unlabeled=None, level=1, base_model_names: List[str] = None,\n feature_prune=False, core_kwargs: dict = None, aux_kwargs: dict = None, name_suffix: str = None) -> (List[str], List[str]):\n \"\"\"\n Similar to calling self.stack_new_level_core, except auxiliary models will also be trained via a call to self.stack_new_level_aux, with the models trained from self.stack_new_level_core used as base models.\n \"\"\"\n if base_model_names is None:\n base_model_names = []\n if level < 1:\n raise AssertionError(f'Stack level must be >= 1, but level={level}.')\n elif not base_model_names and level > 1:\n logger.log(30, f'Warning: Training models at stack level {level}, but no base models were specified.')\n elif base_model_names and level == 1:\n raise AssertionError(f'Stack level 1 models cannot have base models, but base_model_names={base_model_names}.')\n core_kwargs = {} if core_kwargs is None else core_kwargs.copy()\n aux_kwargs = {} if aux_kwargs is None else aux_kwargs.copy()\n if name_suffix:\n core_kwargs['name_suffix'] = core_kwargs.get('name_suffix', '') + name_suffix\n aux_kwargs['name_suffix'] = aux_kwargs.get('name_suffix', '') + name_suffix\n core_models = self.stack_new_level_core(X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled, models=models,\n level=level, base_model_names=base_model_names, feature_prune=feature_prune, **core_kwargs)\n\n if X_val is None:\n aux_models = self.stack_new_level_aux(X=X, y=y, base_model_names=core_models, level=level+1, **aux_kwargs)\n else:\n aux_models = self.stack_new_level_aux(X=X_val, y=y_val, fit=False, base_model_names=core_models, level=level+1, **aux_kwargs)\n return core_models, aux_models\n\n def stack_new_level_core(self, X, y, models: Union[List[AbstractModel], dict], X_val=None, y_val=None, X_unlabeled=None,\n level=1, base_model_names: List[str] = None, stack_name='core',\n ag_args=None, ag_args_fit=None, ag_args_ensemble=None, excluded_model_types=None, ensemble_type=StackerEnsembleModel,\n name_suffix: str = None, get_models_func=None, refit_full=False, **kwargs) -> List[str]:\n \"\"\"\n Trains all models using the data provided.\n If level > 1, then the models will use base model predictions as additional features.\n The base models used can be specified via base_model_names.\n If self.bagged_mode, then models will be trained as StackerEnsembleModels.\n The data provided in this method should not contain stack features, as they will be automatically generated if necessary.\n \"\"\"\n if get_models_func is None:\n get_models_func = self.construct_model_templates\n if base_model_names is None:\n base_model_names = []\n if not self.bagged_mode and level != 1:\n raise ValueError('Stack Ensembling is not valid for non-bagged mode.')\n\n if isinstance(models, dict):\n get_models_kwargs = dict(\n level=level,\n name_suffix=name_suffix,\n ag_args=ag_args,\n ag_args_fit=ag_args_fit,\n excluded_model_types=excluded_model_types,\n )\n\n if self.bagged_mode:\n if level == 1:\n (base_model_names, base_model_paths, base_model_types) = (None, None, None)\n elif level > 1:\n base_model_names, base_model_paths, base_model_types = self._get_models_load_info(model_names=base_model_names)\n if len(base_model_names) == 0:\n logger.log(20, 'No base models to train on, skipping stack level...')\n return []\n else:\n raise AssertionError(f'Stack level cannot be less than 1! level = {level}')\n\n ensemble_kwargs = {\n 'base_model_names': base_model_names,\n 'base_model_paths_dict': base_model_paths,\n 'base_model_types_dict': base_model_types,\n 'random_state': level + self.random_state,\n }\n get_models_kwargs.update(dict(\n ag_args_ensemble=ag_args_ensemble,\n ensemble_type=ensemble_type,\n ensemble_kwargs=ensemble_kwargs,\n ))\n models, model_args_fit = get_models_func(hyperparameters=models, **get_models_kwargs)\n if model_args_fit:\n hyperparameter_tune_kwargs = {\n model_name: model_args_fit[model_name]['hyperparameter_tune_kwargs']\n for model_name in model_args_fit if 'hyperparameter_tune_kwargs' in model_args_fit[model_name]\n }\n kwargs['hyperparameter_tune_kwargs'] = hyperparameter_tune_kwargs\n logger.log(20, f'Fitting {len(models)} L{level} models ...')\n X_init = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=True)\n if X_val is not None:\n X_val = self.get_inputs_to_stacker(X_val, base_models=base_model_names, fit=False)\n if refit_full and X_val is not None:\n X_init = pd.concat([X_init, X_val])\n y = pd.concat([y, y_val])\n X_val = None\n y_val = None\n if X_unlabeled is not None:\n X_unlabeled = self.get_inputs_to_stacker(X_unlabeled, base_models=base_model_names, fit=False)\n\n fit_kwargs = dict(num_classes=self.num_classes)\n\n # FIXME: TODO: v0.1 X_unlabeled isn't cached so it won't be available during refit_full or fit_extra.\n return self._train_multi(X=X_init, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled,\n models=models, level=level, stack_name=stack_name, fit_kwargs=fit_kwargs, **kwargs)\n\n # TODO: Consider making level be auto-determined based off of max(base_model_levels)+1\n # TODO: Remove name_suffix, hacked in\n # TODO: X can be optional because it isn't needed if fit=True\n def stack_new_level_aux(self, X, y, base_model_names: List[str], level, fit=True, stack_name='aux1', time_limit=None, name_suffix: str = None, get_models_func=None, check_if_best=True) -> List[str]:\n \"\"\"\n Trains auxiliary models (currently a single weighted ensemble) using the provided base models.\n Level must be greater than the level of any of the base models.\n Auxiliary models never use the original features and only train with the predictions of other models as features.\n \"\"\"\n X_stack_preds = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=fit, use_orig_features=False)\n if self.weight_evaluation:\n X, w = extract_column(X, self.sample_weight) # TODO: consider redesign with w as separate arg instead of bundled inside X\n if w is not None:\n X_stack_preds[self.sample_weight] = w.values/w.mean()\n return self.generate_weighted_ensemble(X=X_stack_preds, y=y, level=level, base_model_names=base_model_names, k_fold=1, n_repeats=1, stack_name=stack_name, time_limit=time_limit, name_suffix=name_suffix, get_models_func=get_models_func, check_if_best=check_if_best)\n\n def predict(self, X, model=None):\n if model is None:\n model = self._get_best()\n return self._predict_model(X, model)\n\n def predict_proba(self, X, model=None):\n if model is None:\n model = self._get_best()\n return self._predict_proba_model(X, model)\n\n def _get_best(self):\n if self.model_best is not None:\n return self.model_best\n else:\n return self.get_model_best()\n\n # Note: model_pred_proba_dict is mutated in this function to minimize memory usage\n def get_inputs_to_model(self, model, X, model_pred_proba_dict=None, fit=False, preprocess_nonadaptive=False):\n \"\"\"\n For output X:\n If preprocess_nonadaptive=False, call model.predict(X)\n If preprocess_nonadaptive=True, call model.predict(X, preprocess_nonadaptive=False)\n \"\"\"\n if isinstance(model, str):\n # TODO: Remove unnecessary load when no stacking\n model = self.load_model(model)\n model_level = self.get_model_level(model.name)\n if model_level > 1 and isinstance(model, StackerEnsembleModel):\n if fit:\n model_pred_proba_dict = None\n else:\n model_set = self.get_minimum_model_set(model)\n model_set = [m for m in model_set if m != model.name] # TODO: Can probably be faster, get this result from graph\n model_pred_proba_dict = self.get_model_pred_proba_dict(X=X, models=model_set, model_pred_proba_dict=model_pred_proba_dict, fit=fit)\n X = model.preprocess(X=X, preprocess_nonadaptive=preprocess_nonadaptive, fit=fit, model_pred_proba_dict=model_pred_proba_dict)\n elif preprocess_nonadaptive:\n X = model.preprocess(X=X, preprocess_stateful=False)\n return X\n\n def score(self, X, y, model=None, weights=None) -> float:\n if self.eval_metric.needs_pred or self.eval_metric.needs_quantile:\n y_pred = self.predict(X=X, model=model)\n else:\n y_pred = self.predict_proba(X=X, model=model)\n return compute_weighted_metric(y, y_pred, self.eval_metric, weights, weight_evaluation=self.weight_evaluation,\n quantile_levels=self.quantile_levels)\n\n def score_with_y_pred_proba(self, y, y_pred_proba, weights=None) -> float:\n if self.eval_metric.needs_pred or self.eval_metric.needs_quantile:\n y_pred = get_pred_from_proba(y_pred_proba=y_pred_proba, problem_type=self.problem_type)\n else:\n y_pred = y_pred_proba\n return compute_weighted_metric(y, y_pred, self.eval_metric, weights, weight_evaluation=self.weight_evaluation,\n quantile_levels=self.quantile_levels)\n\n # TODO: Consider adding persist to disk functionality for pred_proba dictionary to lessen memory burden on large multiclass problems.\n # For datasets with 100+ classes, this function could potentially run the system OOM due to each pred_proba numpy array taking significant amounts of space.\n # This issue already existed in the previous level-based version but only had the minimum required predictions in memory at a time, whereas this has all model predictions in memory.\n # TODO: Add memory optimal topological ordering -> Minimize amount of pred_probas in memory at a time, delete pred probas that are no longer required\n # Optimally computes pred_probas for each model in `models`. Will compute each necessary model only once and store its predictions in a dictionary.\n # Note: Mutates model_pred_proba_dict and model_pred_time_dict input if present to minimize memory usage\n # fit = get oof pred proba\n # if record_pred_time is `True`, outputs tuple of dicts (model_pred_proba_dict, model_pred_time_dict), else output only model_pred_proba_dict\n def get_model_pred_proba_dict(self, X, models, model_pred_proba_dict=None, model_pred_time_dict=None, fit=False, record_pred_time=False):\n if model_pred_proba_dict is None:\n model_pred_proba_dict = {}\n if model_pred_time_dict is None:\n model_pred_time_dict = {}\n\n if fit:\n model_pred_order = [model for model in models if model not in model_pred_proba_dict.keys()]\n else:\n model_set = set()\n for model in models:\n if model in model_set:\n continue\n min_model_set = set(self.get_minimum_model_set(model))\n model_set = model_set.union(min_model_set)\n model_set = model_set.difference(set(model_pred_proba_dict.keys()))\n models_to_load = list(model_set)\n subgraph = nx.subgraph(self.model_graph, models_to_load)\n\n # For model in model_pred_proba_dict, remove model node from graph and all ancestors that have no remaining descendants and are not in `models`\n models_to_ignore = [model for model in models_to_load if (model not in models) and (not list(subgraph.successors(model)))]\n while models_to_ignore:\n model = models_to_ignore[0]\n predecessors = list(subgraph.predecessors(model))\n subgraph.remove_node(model)\n models_to_ignore = models_to_ignore[1:]\n for predecessor in predecessors:\n if (predecessor not in models) and (not list(subgraph.successors(predecessor))) and (predecessor not in models_to_ignore):\n models_to_ignore.append(predecessor)\n\n # Get model prediction order\n model_pred_order = list(nx.lexicographical_topological_sort(subgraph))\n\n # Compute model predictions in topological order\n for model_name in model_pred_order:\n if record_pred_time:\n time_start = time.time()\n\n if fit:\n model_type = self.get_model_attribute(model=model_name, attribute='type')\n if issubclass(model_type, BaggedEnsembleModel):\n model_path = self.get_model_attribute(model=model_name, attribute='path')\n model_pred_proba_dict[model_name] = model_type.load_oof(path=model_path)\n else:\n raise AssertionError(f'Model {model_name} must be a BaggedEnsembleModel to return oof_pred_proba')\n else:\n model = self.load_model(model_name=model_name)\n if isinstance(model, StackerEnsembleModel):\n preprocess_kwargs = dict(infer=False, model_pred_proba_dict=model_pred_proba_dict)\n model_pred_proba_dict[model_name] = model.predict_proba(X, **preprocess_kwargs)\n else:\n model_pred_proba_dict[model_name] = model.predict_proba(X)\n\n if record_pred_time:\n time_end = time.time()\n model_pred_time_dict[model_name] = time_end - time_start\n\n if record_pred_time:\n return model_pred_proba_dict, model_pred_time_dict\n else:\n return model_pred_proba_dict\n\n # TODO: Remove _get_inputs_to_stacker_legacy eventually, move logic internally into this function instead\n def get_inputs_to_stacker(self, X, base_models, model_pred_proba_dict=None, fit=False, use_orig_features=True):\n if base_models is None:\n base_models = []\n if not fit:\n model_pred_proba_dict = self.get_model_pred_proba_dict(X=X, models=base_models, model_pred_proba_dict=model_pred_proba_dict)\n model_pred_proba_list = [model_pred_proba_dict[model] for model in base_models]\n else:\n # TODO: After _get_inputs_to_stacker_legacy is removed, this if/else is not necessary, instead pass fit param to get_model_pred_proba_dict()\n model_pred_proba_list = None\n\n X_stacker_input = self._get_inputs_to_stacker_legacy(X=X, level_start=1, level_end=2, model_levels={1: base_models}, y_pred_probas=model_pred_proba_list, fit=fit)\n if not use_orig_features:\n X_stacker_input = X_stacker_input.drop(columns=X.columns)\n return X_stacker_input\n\n # TODO: Legacy code, still used during training because it is technically slightly faster and more memory efficient than get_model_pred_proba_dict()\n # Remove in future as it limits flexibility in stacker inputs during training\n def _get_inputs_to_stacker_legacy(self, X, level_start, level_end, model_levels, y_pred_probas=None, fit=False):\n if level_start > level_end:\n raise AssertionError(f'level_start cannot be greater than level end: ({level_start}, {level_end})')\n if (level_start == 1) and (level_end == 1):\n return X\n if fit:\n if level_start > 1:\n dummy_stacker_start = self._get_dummy_stacker(level=level_start, model_levels=model_levels, use_orig_features=True)\n cols_to_drop = dummy_stacker_start.stack_columns\n X = X.drop(cols_to_drop, axis=1)\n dummy_stacker = self._get_dummy_stacker(level=level_end, model_levels=model_levels, use_orig_features=True)\n X = dummy_stacker.preprocess(X=X, preprocess_nonadaptive=False, fit=True, compute_base_preds=True)\n elif y_pred_probas is not None:\n if y_pred_probas == []:\n return X\n dummy_stacker = self._get_dummy_stacker(level=level_end, model_levels=model_levels, use_orig_features=True)\n X_stacker = dummy_stacker.pred_probas_to_df(pred_proba=y_pred_probas, index=X.index)\n if dummy_stacker.params['use_orig_features']:\n if level_start > 1:\n dummy_stacker_start = self._get_dummy_stacker(level=level_start, model_levels=model_levels, use_orig_features=True)\n cols_to_drop = dummy_stacker_start.stack_columns\n X = X.drop(cols_to_drop, axis=1)\n X = pd.concat([X_stacker, X], axis=1)\n else:\n X = X_stacker\n else:\n dummy_stackers = {}\n for level in range(level_start, level_end+1):\n if level > 1:\n dummy_stackers[level] = self._get_dummy_stacker(level=level, model_levels=model_levels, use_orig_features=True)\n for level in range(level_start, level_end):\n if level > 1:\n cols_to_drop = dummy_stackers[level].stack_columns\n else:\n cols_to_drop = []\n X = dummy_stackers[level+1].preprocess(X=X, preprocess_nonadaptive=False, fit=False, compute_base_preds=True)\n if len(cols_to_drop) > 0:\n X = X.drop(cols_to_drop, axis=1)\n return X\n\n # You must have previously called fit() with cache_data=True\n # Fits _FULL versions of specified models, but does NOT link them (_FULL stackers will still use normal models as input)\n def refit_single_full(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled=None, models=None) -> List[str]:\n if X is None:\n X = self.load_X()\n if X_val is None:\n X_val = self.load_X_val()\n if y is None:\n y = self.load_y()\n if y_val is None:\n y_val = self.load_y_val()\n\n if models is None:\n models = self.get_model_names()\n\n model_levels = dict()\n ignore_models = []\n ignore_stack_names = [REFIT_FULL_NAME]\n for stack_name in ignore_stack_names:\n ignore_models += self.get_model_names(stack_name=stack_name) # get_model_names returns [] if stack_name does not exist\n models = [model for model in models if model not in ignore_models]\n for model in models:\n model_level = self.get_model_level(model)\n if model_level not in model_levels:\n model_levels[model_level] = []\n model_levels[model_level].append(model)\n\n levels = sorted(model_levels.keys())\n models_trained_full = []\n model_full_dict = {}\n for level in levels:\n models_level = model_levels[level]\n for model in models_level:\n model = self.load_model(model)\n model_name = model.name\n model_full = model.convert_to_refit_full_template()\n # Mitigates situation where bagged models barely had enough memory and refit requires more. Worst case results in OOM, but this lowers chance of failure.\n model_full._user_params_aux['max_memory_usage_ratio'] = model.params_aux['max_memory_usage_ratio'] * 1.15\n # TODO: Do it for all models in the level at once to avoid repeated processing of data?\n base_model_names = self.get_base_model_names(model_name)\n stacker_type = type(model)\n if issubclass(stacker_type, WeightedEnsembleModel):\n # TODO: Technically we don't need to re-train the weighted ensemble, we could just copy the original and re-use the weights.\n w = None\n if X_val is None:\n if self.weight_evaluation:\n X, w = extract_column(X, self.sample_weight)\n X_stack_preds = self.get_inputs_to_stacker(X, base_models=base_model_names, fit=True, use_orig_features=False)\n y_input = y\n else:\n if self.weight_evaluation:\n X_val, w = extract_column(X_val, self.sample_weight)\n X_stack_preds = self.get_inputs_to_stacker(X_val, base_models=base_model_names, fit=False, use_orig_features=False) # TODO: May want to cache this during original fit, as we do with OOF preds\n y_input = y_val\n if w is not None:\n X_stack_preds[self.sample_weight] = w.values/w.mean()\n\n orig_weights = model._get_model_weights()\n base_model_names = list(orig_weights.keys())\n weights = list(orig_weights.values())\n\n child_hyperparameters = {\n AG_ARGS: {'model_type': 'SIMPLE_ENS_WEIGHTED'},\n 'weights': weights,\n }\n\n # TODO: stack_name=REFIT_FULL_NAME_AUX?\n models_trained = self.generate_weighted_ensemble(X=X_stack_preds, y=y_input, level=level, stack_name=REFIT_FULL_NAME, k_fold=1, n_repeats=1,\n base_model_names=base_model_names, name_suffix=REFIT_FULL_SUFFIX, save_bag_folds=True,\n check_if_best=False, child_hyperparameters=child_hyperparameters)\n # TODO: Do the below more elegantly, ideally as a parameter to the trainer train function to disable recording scores/pred time.\n for model_weighted_ensemble in models_trained:\n model_loaded = self.load_model(model_weighted_ensemble)\n model_loaded.val_score = None\n model_loaded.predict_time = None\n self.set_model_attribute(model=model_weighted_ensemble, attribute='val_score', val=None)\n self.save_model(model_loaded)\n else:\n models_trained = self.stack_new_level_core(X=X, y=y, X_val=X_val, y_val=y_val, X_unlabeled=X_unlabeled, models=[model_full], base_model_names=base_model_names, level=level, stack_name=REFIT_FULL_NAME,\n hyperparameter_tune_kwargs=None, feature_prune=False, k_fold=0, n_repeats=1, ensemble_type=stacker_type, refit_full=True)\n if len(models_trained) == 1:\n model_full_dict[model_name] = models_trained[0]\n for model_trained in models_trained:\n self._model_full_dict_val_score[model_trained] = self.get_model_attribute(model_name, 'val_score')\n models_trained_full += models_trained\n\n keys_to_del = []\n for model in model_full_dict.keys():\n if model_full_dict[model] not in models_trained_full:\n keys_to_del.append(model)\n for key in keys_to_del:\n del model_full_dict[key]\n self.model_full_dict.update(model_full_dict)\n self.save() # TODO: This could be more efficient by passing in arg to not save if called by refit_ensemble_full since it saves anyways later.\n return models_trained_full\n\n # Fits _FULL models and links them in the stack so _FULL models only use other _FULL models as input during stacking\n # If model is specified, will fit all _FULL models that are ancestors of the provided model, automatically linking them.\n # If no model is specified, all models are refit and linked appropriately.\n def refit_ensemble_full(self, model='all') -> dict:\n if model == 'all':\n ensemble_set = self.get_model_names()\n else:\n if model == 'best':\n model = self.get_model_best()\n ensemble_set = self.get_minimum_model_set(model)\n existing_models = self.get_model_names()\n ensemble_set_valid = []\n for model in ensemble_set:\n if model in self.model_full_dict and self.model_full_dict[model] in existing_models:\n logger.log(20, f\"Model '{model}' already has a refit _FULL model: '{self.model_full_dict[model]}', skipping refit...\")\n else:\n ensemble_set_valid.append(model)\n if ensemble_set_valid:\n models_trained_full = self.refit_single_full(models=ensemble_set_valid)\n else:\n models_trained_full = []\n\n for model_full in models_trained_full:\n # TODO: Consider moving base model info to a separate pkl file so that it can be edited without having to load/save the model again\n # Downside: Slower inference speed when models are not persisted in memory prior.\n model_loaded = self.load_model(model_full)\n if isinstance(model_loaded, StackerEnsembleModel):\n for stack_column_prefix in model_loaded.stack_column_prefix_lst:\n base_model = model_loaded.stack_column_prefix_to_model_map[stack_column_prefix]\n new_base_model = self.model_full_dict[base_model]\n new_base_model_type = self.get_model_attribute(model=new_base_model, attribute='type')\n new_base_model_path = self.get_model_attribute(model=new_base_model, attribute='path')\n\n model_loaded.base_model_paths_dict[new_base_model] = new_base_model_path\n model_loaded.base_model_types_dict[new_base_model] = new_base_model_type\n model_loaded.base_model_names.append(new_base_model)\n model_loaded.stack_column_prefix_to_model_map[stack_column_prefix] = new_base_model\n\n model_loaded.save() # TODO: Avoid this!\n\n # Remove old edges and add new edges\n edges_to_remove = list(self.model_graph.in_edges(model_loaded.name))\n self.model_graph.remove_edges_from(edges_to_remove)\n if isinstance(model_loaded, StackerEnsembleModel):\n for stack_column_prefix in model_loaded.stack_column_prefix_lst:\n base_model_name = model_loaded.stack_column_prefix_to_model_map[stack_column_prefix]\n self.model_graph.add_edge(base_model_name, model_loaded.name)\n\n self.save()\n return copy.deepcopy(self.model_full_dict)\n\n # TODO: Take best performance model with lowest inference\n def get_model_best(self, can_infer=None, allow_full=True):\n models = self.get_model_names(can_infer=can_infer)\n if not models:\n raise AssertionError('Trainer has no fit models that can infer.')\n model_performances = self.get_models_attribute_dict(attribute='val_score')\n perfs = [(m, model_performances[m]) for m in models if model_performances[m] is not None]\n if not perfs:\n model_full_dict_inverse = {full: orig for orig, full in self.model_full_dict.items()}\n models = [m for m in models if m in model_full_dict_inverse]\n perfs = [(m, self._get_full_model_val_score(m)) for m in models]\n if not perfs:\n raise AssertionError('No fit models that can infer exist with a validation score to choose the best model.')\n elif not allow_full:\n raise AssertionError('No fit models that can infer exist with a validation score to choose the best model, but refit_full models exist. Set `allow_full=True` to get the best refit_full model.')\n return max(perfs, key=lambda i: i[1])[0]\n\n def save_model(self, model, reduce_memory=True):\n # TODO: In future perhaps give option for the reduce_memory_size arguments, perhaps trainer level variables specified by user?\n if reduce_memory:\n model.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)\n if self.low_memory:\n model.save()\n else:\n self.models[model.name] = model\n\n def save(self):\n models = self.models\n if self.low_memory:\n self.models = {}\n save_pkl.save(path=self.path + self.trainer_file_name, object=self)\n if self.low_memory:\n self.models = models\n\n def persist_models(self, model_names='all', with_ancestors=False, max_memory=None) -> List[str]:\n if model_names == 'all':\n model_names = self.get_model_names()\n elif model_names == 'best':\n if self.model_best is not None:\n model_names = [self.model_best]\n else:\n model_names = [self.get_model_best(can_infer=True)]\n if not isinstance(model_names, list):\n raise ValueError(f'model_names must be a list of model names. Invalid value: {model_names}')\n if with_ancestors:\n model_names = self.get_minimum_models_set(model_names)\n model_names_already_persisted = [model_name for model_name in model_names if model_name in self.models]\n if model_names_already_persisted:\n logger.log(30, f'The following {len(model_names_already_persisted)} models were already persisted and will be ignored in the model loading process: {model_names_already_persisted}')\n model_names = [model_name for model_name in model_names if model_name not in model_names_already_persisted]\n if not model_names:\n logger.log(30, f'No valid unpersisted models were specified to be persisted, so no change in model persistence was performed.')\n return []\n if max_memory is not None:\n info = self.get_models_info(model_names)\n model_mem_size_map = {model: info[model]['memory_size'] for model in model_names}\n for model in model_mem_size_map:\n if 'children_info' in info[model]:\n for child in info[model]['children_info'].values():\n model_mem_size_map[model] += child['memory_size']\n total_mem_required = sum(model_mem_size_map.values())\n available_mem = psutil.virtual_memory().available\n memory_proportion = total_mem_required / available_mem\n if memory_proportion > max_memory:\n logger.log(30, f'Models will not be persisted in memory as they are expected to require {round(memory_proportion * 100, 2)}% of memory, which is greater than the specified max_memory limit of {round(max_memory*100, 2)}%.')\n logger.log(30, f'\\tModels will be loaded on-demand from disk to maintain safe memory usage, increasing inference latency. If inference latency is a concern, try to use smaller models or increase the value of max_memory.')\n return []\n else:\n logger.log(20, f'Persisting {len(model_names)} models in memory. Models will require {round(memory_proportion*100, 2)}% of memory.')\n\n models = []\n for model_name in model_names:\n model = self.load_model(model_name)\n self.models[model.name] = model\n models.append(model)\n\n for model in models:\n # TODO: Move this to model code\n if isinstance(model, BaggedEnsembleModel):\n for fold, fold_model in enumerate(model.models):\n if isinstance(fold_model, str):\n model.models[fold] = model.load_child(fold_model)\n return model_names\n\n # TODO: model_name change to model in params\n def load_model(self, model_name: str, path: str = None, model_type=None) -> AbstractModel:\n if isinstance(model_name, AbstractModel):\n return model_name\n if model_name in self.models.keys():\n return self.models[model_name]\n else:\n if path is None:\n path = self.get_model_attribute(model=model_name, attribute='path')\n if model_type is None:\n model_type = self.get_model_attribute(model=model_name, attribute='type')\n return model_type.load(path=path, reset_paths=self.reset_paths)\n\n def unpersist_models(self, model_names='all') -> list:\n if model_names == 'all':\n model_names = list(self.models.keys())\n if not isinstance(model_names, list):\n raise ValueError(f'model_names must be a list of model names. Invalid value: {model_names}')\n unpersisted_models = []\n for model in model_names:\n if model in self.models:\n self.models.pop(model)\n unpersisted_models.append(model)\n if unpersisted_models:\n logger.log(20, f'Unpersisted {len(unpersisted_models)} models: {unpersisted_models}')\n else:\n logger.log(30, f'No valid persisted models were specified to be unpersisted, so no change in model persistence was performed.')\n return unpersisted_models\n\n def generate_weighted_ensemble(self, X, y, level, base_model_names, k_fold=1, n_repeats=1, stack_name=None, hyperparameters=None,\n time_limit=None, name_suffix: str = None, save_bag_folds=None, check_if_best=True, child_hyperparameters=None,\n get_models_func=None) -> List[str]:\n if get_models_func is None:\n get_models_func = self.construct_model_templates\n if len(base_model_names) == 0:\n logger.log(20, 'No base models to train on, skipping weighted ensemble...')\n return []\n\n if child_hyperparameters is None:\n child_hyperparameters = {}\n\n if save_bag_folds is None:\n can_infer_dict = self.get_models_attribute_dict('can_infer', models=base_model_names)\n if False in can_infer_dict.values():\n save_bag_folds = False\n else:\n save_bag_folds = True\n\n weighted_ensemble_model, _ = get_models_func(\n hyperparameters={\n 'default': {\n 'ENS_WEIGHTED': [child_hyperparameters],\n }\n },\n ensemble_type=WeightedEnsembleModel,\n ensemble_kwargs=dict(\n base_model_names=base_model_names,\n base_model_paths_dict=self.get_models_attribute_dict(attribute='path', models=base_model_names),\n base_model_types_dict=self.get_models_attribute_dict(attribute='type', models=base_model_names),\n base_model_types_inner_dict=self.get_models_attribute_dict(attribute='type_inner', models=base_model_names),\n base_model_performances_dict=self.get_models_attribute_dict(attribute='val_score', models=base_model_names),\n hyperparameters=hyperparameters,\n random_state=level + self.random_state,\n ),\n ag_args={'name_bag_suffix': ''},\n ag_args_ensemble={'save_bag_folds': save_bag_folds},\n name_suffix=name_suffix,\n level=level,\n )\n weighted_ensemble_model = weighted_ensemble_model[0]\n w = None\n if self.weight_evaluation:\n X, w = extract_column(X, self.sample_weight)\n models = self._train_multi(\n X=X,\n y=y,\n X_val=None,\n y_val=None,\n models=[weighted_ensemble_model],\n k_fold=k_fold,\n n_repeats=n_repeats,\n hyperparameter_tune_kwargs=None,\n feature_prune=False,\n stack_name=stack_name,\n level=level,\n time_limit=time_limit,\n ens_sample_weight=w,\n fit_kwargs=dict(num_classes=self.num_classes, groups=None), # FIXME: Is this the right way to do this?\n )\n for weighted_ensemble_model_name in models:\n if check_if_best and weighted_ensemble_model_name in self.get_model_names():\n if self.model_best is None:\n self.model_best = weighted_ensemble_model_name\n else:\n best_score = self.get_model_attribute(self.model_best, 'val_score')\n cur_score = self.get_model_attribute(weighted_ensemble_model_name, 'val_score')\n if cur_score > best_score:\n # new best model\n self.model_best = weighted_ensemble_model_name\n return models\n\n def _train_single(self, X, y, model: AbstractModel, X_val=None, y_val=None, **model_fit_kwargs) -> AbstractModel:\n \"\"\"\n Trains model but does not add the trained model to this Trainer.\n Returns trained model object.\n \"\"\"\n model = model.fit(X=X, y=y, X_val=X_val, y_val=y_val, **model_fit_kwargs)\n return model\n\n def _train_and_save(self, X, y, model: AbstractModel, X_val=None, y_val=None, stack_name='core', level=1, **model_fit_kwargs) -> List[str]:\n \"\"\"\n Trains model and saves it to disk, returning a list with a single element: The name of the model, or no elements if training failed.\n If the model name is returned:\n The model can be accessed via self.load_model(model.name).\n The model will have metadata information stored in self.model_graph.\n The model's name will be appended to self.models_level[stack_name][level]\n The model will be accessible and usable through any Trainer function that takes as input 'model' or 'model_name'.\n Note: self._train_and_save should not be used outside of self._train_single_full\n \"\"\"\n fit_start_time = time.time()\n time_limit = model_fit_kwargs.get('time_limit', None)\n model_names_trained = []\n try:\n fit_log_message = f'Fitting model: {model.name} ...'\n if time_limit is not None:\n if time_limit <= 0:\n logger.log(15, f'Skipping {model.name} due to lack of time remaining.')\n return model_names_trained\n if self._time_limit is not None and self._time_train_start is not None:\n time_left_total = self._time_limit - (fit_start_time - self._time_train_start)\n else:\n time_left_total = time_limit\n fit_log_message += f' Training model for up to {round(time_limit, 2)}s of the {round(time_left_total, 2)}s of remaining time.'\n logger.log(20, fit_log_message)\n model = self._train_single(X, y, model, X_val, y_val, **model_fit_kwargs)\n fit_end_time = time.time()\n if self.weight_evaluation:\n w = model_fit_kwargs.get('sample_weight', None)\n w_val = model_fit_kwargs.get('sample_weight_val', None)\n else:\n w = None\n w_val = None\n if isinstance(model, BaggedEnsembleModel):\n if X_val is not None and y_val is not None:\n score = model.score(X=X_val, y=y_val, sample_weight=w_val)\n elif model.is_valid_oof() or isinstance(model, WeightedEnsembleModel):\n score = model.score_with_oof(y=y, sample_weight=w)\n else:\n score = None\n else:\n if X_val is not None and y_val is not None:\n score = model.score(X=X_val, y=y_val, sample_weight=w_val)\n else:\n score = None\n pred_end_time = time.time()\n if model.fit_time is None:\n model.fit_time = fit_end_time - fit_start_time\n if model.predict_time is None:\n if score is None:\n model.predict_time = None\n else:\n model.predict_time = pred_end_time - fit_end_time\n model.val_score = score\n # TODO: Add recursive=True to avoid repeatedly loading models each time this is called for bagged ensembles (especially during repeated bagging)\n self.save_model(model=model)\n except TimeLimitExceeded:\n logger.log(20, f'\\tTime limit exceeded... Skipping {model.name}.')\n # logger.log(20, '\\tTime wasted: ' + str(time.time() - fit_start_time))\n del model\n except NotEnoughMemoryError:\n logger.warning(f'\\tNot enough memory to train {model.name}... Skipping this model.')\n del model\n except NoValidFeatures:\n logger.warning(f'\\tNo valid features to train {model.name}... Skipping this model.')\n del model\n except NoGPUError:\n logger.warning(f'\\tNo GPUs available to train {model.name}... Skipping this model.')\n del model\n except ImportError as err:\n logger.error(f'\\tWarning: Exception caused {model.name} to fail during training (ImportError)... Skipping this model.')\n logger.error(f'\\t\\t{err}')\n if self.verbosity > 2:\n logger.exception('Detailed Traceback:')\n except Exception as err:\n logger.error(f'\\tWarning: Exception caused {model.name} to fail during training... Skipping this model.')\n logger.error(f'\\t\\t{err}')\n if self.verbosity > 0:\n logger.exception('Detailed Traceback:')\n del model\n else:\n self._add_model(model=model, stack_name=stack_name, level=level)\n model_names_trained.append(model.name)\n if self.low_memory:\n del model\n return model_names_trained\n\n def _add_model(self, model: AbstractModel, stack_name: str = 'core', level: int = 1) -> bool:\n \"\"\"\n Registers the fit model in the Trainer object. Stores information such as model performance, save path, model type, and more.\n To use a model in Trainer, self._add_model must be called.\n If self.low_memory, then the model object will be deleted after this call. Use Trainer directly to leverage the model further.\n\n Parameters\n ----------\n model : AbstractModel\n Model which has been fit. This model will be registered to the Trainer.\n stack_name : str, default 'core'\n Stack name to assign the model to. This is used for advanced functionality.\n level : int, default 1\n Stack level of the stack name to assign the model to. This is used for advanced functionality.\n The model's name is appended to self.models_level[stack_name][level]\n The model's base_models (if it has any) must all be a lower level than the model.\n\n Returns\n -------\n boolean, True if model was registered, False if model was found to be invalid and not registered.\n \"\"\"\n if model.val_score is not None:\n if model.eval_metric.name != self.eval_metric.name:\n logger.log(20, f'\\tNote: model has different eval_metric than default.')\n logger.log(20, f'\\t{round(model.val_score, 4)}\\t = Validation score ({model.eval_metric.name})')\n if model.fit_time is not None:\n logger.log(20, f'\\t{round(model.fit_time, 2)}s\\t = Training runtime')\n if model.predict_time is not None:\n logger.log(20, f'\\t{round(model.predict_time, 2)}s\\t = Validation runtime')\n if model.val_score is not None and np.isnan(model.val_score):\n logger.warning(f'WARNING: {model.name} has a val_score of {model.val_score} (NaN)! This should never happen. The model will not be saved to avoid instability.')\n return False\n # TODO: Add to HPO\n if isinstance(model, BaggedEnsembleModel):\n type_inner = model._child_type\n else:\n type_inner = type(model)\n self.model_graph.add_node(\n model.name,\n fit_time=model.fit_time,\n predict_time=model.predict_time,\n val_score=model.val_score,\n path=model.path,\n type=type(model), # Outer type, can be BaggedEnsemble, StackEnsemble (Type that is able to load the model)\n type_inner=type_inner, # Inner type, if Ensemble then it is the type of the inner model (May not be able to load with this type)\n can_infer=model.can_infer(),\n can_fit=model.can_fit(),\n is_valid=model.is_valid(),\n stack_name=stack_name,\n level=level,\n **model._fit_metadata,\n )\n if isinstance(model, StackerEnsembleModel):\n prior_models = self.get_model_names()\n # TODO: raise exception if no base models and level != 1?\n for stack_column_prefix in model.stack_column_prefix_lst:\n base_model_name = model.stack_column_prefix_to_model_map[stack_column_prefix]\n if base_model_name not in prior_models:\n raise AssertionError(f\"Model '{model.name}' depends on model '{base_model_name}', but '{base_model_name}' is not registered as a trained model! Valid models: {prior_models}\")\n elif level <= self.model_graph.nodes[base_model_name]['level']:\n raise AssertionError(f\"Model '{model.name}' depends on model '{base_model_name}', but '{base_model_name}' is not in a lower stack level. ('{model.name}' level: {level}, '{base_model_name}' level: {self.model_graph.nodes[base_model_name]['level']})\")\n self.model_graph.add_edge(base_model_name, model.name)\n if self.low_memory:\n del model\n return True\n\n # TODO: Split this to avoid confusion, HPO should go elsewhere?\n def _train_single_full(self, X, y, model: AbstractModel, X_unlabeled=None, X_val=None, y_val=None, feature_prune=False, hyperparameter_tune_kwargs=None,\n stack_name='core', k_fold=None, k_fold_start=0, k_fold_end=None, n_repeats=None, n_repeat_start=0, level=1, time_limit=None, fit_kwargs=None, **kwargs) -> List[str]:\n \"\"\"\n Trains a model, with the potential to train multiple versions of this model with hyperparameter tuning and feature pruning.\n Returns a list of successfully trained and saved model names.\n Models trained from this method will be accessible in this Trainer.\n \"\"\"\n if k_fold is None:\n k_fold = self.k_fold\n if n_repeats is None:\n n_repeats = self.n_repeats\n if fit_kwargs is None:\n fit_kwargs = dict()\n model_fit_kwargs = dict(\n time_limit=time_limit,\n verbosity=self.verbosity,\n )\n model_fit_kwargs.update(fit_kwargs)\n if self.sample_weight is not None:\n X, w_train = extract_column(X, self.sample_weight)\n if w_train is not None: # may be None for ensemble\n # TODO: consider moving weight normalization into AbstractModel.fit()\n model_fit_kwargs['sample_weight'] = w_train.values/w_train.mean() # normalization can affect gradient algorithms like boosting\n if X_val is not None:\n X_val, w_val = extract_column(X_val, self.sample_weight)\n if self.weight_evaluation and w_val is not None: # ignore validation sample weights unless weight_evaluation specified\n model_fit_kwargs['sample_weight_val'] = w_val.values/w_val.mean()\n ens_sample_weight = kwargs.get('ens_sample_weight', None)\n if ens_sample_weight is not None:\n model_fit_kwargs['sample_weight'] = ens_sample_weight # sample weights to use for weighted ensemble only\n if self._groups is not None and 'groups' not in model_fit_kwargs:\n if k_fold == self.k_fold: # don't do this on refit full\n model_fit_kwargs['groups'] = self._groups\n\n #######################\n # FIXME: This section is a hack, compute genuine feature_metadata for each stack level instead\n # Don't do this here, do this upstream so it isn't recomputed for each model\n # Add feature_metadata to model_fit_kwargs\n # FIXME: Sample weight `extract_column` is a hack, have to compute feature_metadata here because sample weight column could be in X upstream, extract sample weight column upstream instead.\n # FIXME: This doesn't assign proper special types to stack features, relying on a hack in StackerEnsembleModel to assign S_STACK to feature metadata, don't do this.\n # Remove hack in StackerEnsembleModel\n feature_metadata = self.feature_metadata\n features_base = self.feature_metadata.get_features()\n features_new = [feature for feature in X.columns if feature not in features_base]\n if features_new:\n feature_metadata_new = FeatureMetadata.from_df(X[features_new])\n feature_metadata = feature_metadata.join_metadata(feature_metadata_new).keep_features(list(X.columns))\n model_fit_kwargs['feature_metadata'] = feature_metadata\n #######################\n\n if hyperparameter_tune_kwargs:\n if n_repeat_start != 0:\n raise ValueError(f'n_repeat_start must be 0 to hyperparameter_tune, value = {n_repeat_start}')\n elif k_fold_start != 0:\n raise ValueError(f'k_fold_start must be 0 to hyperparameter_tune, value = {k_fold_start}')\n if not isinstance(hyperparameter_tune_kwargs, tuple):\n num_trials = 1 if time_limit is None else 1000\n hyperparameter_tune_kwargs = scheduler_factory(hyperparameter_tune_kwargs, num_trials=num_trials, nthreads_per_trial='auto', ngpus_per_trial='auto')\n # hpo_models (dict): keys = model_names, values = model_paths\n logger.log(20, f'Hyperparameter tuning model: {model.name} ...')\n try:\n if isinstance(model, BaggedEnsembleModel):\n hpo_models, hpo_model_performances, hpo_results = model.hyperparameter_tune(X=X, y=y, k_fold=k_fold, scheduler_options=hyperparameter_tune_kwargs, **model_fit_kwargs)\n else:\n hpo_models, hpo_model_performances, hpo_results = model.hyperparameter_tune(X=X, y=y, X_val=X_val, y_val=y_val, scheduler_options=hyperparameter_tune_kwargs, **model_fit_kwargs)\n except Exception as err:\n logger.exception(f'Warning: Exception caused {model.name} to fail during hyperparameter tuning... Skipping this model.')\n logger.warning(err)\n del model\n model_names_trained = []\n else:\n # Commented out because it takes too much space (>>5 GB if run for an hour on a small-medium sized dataset)\n # self.hpo_results[model.name] = hpo_results\n model_names_trained = []\n self._extra_banned_names.add(model.name)\n for model_hpo_name, model_path in hpo_models.items():\n model_hpo = self.load_model(model_hpo_name, path=model_path, model_type=type(model))\n logger.log(20, f'Fitted model: {model_hpo.name} ...')\n if self._add_model(model=model_hpo, stack_name=stack_name, level=level):\n model_names_trained.append(model_hpo.name)\n else:\n if isinstance(model, BaggedEnsembleModel):\n model_fit_kwargs.update(dict(\n k_fold=k_fold,\n k_fold_start=k_fold_start,\n k_fold_end=k_fold_end,\n n_repeats=n_repeats,\n n_repeat_start=n_repeat_start,\n compute_base_preds=False,\n ))\n model_names_trained = self._train_and_save(X, y, model, X_val, y_val, X_unlabeled=X_unlabeled, stack_name=stack_name, level=level, **model_fit_kwargs)\n self.save()\n return model_names_trained\n\n # TODO: How to deal with models that fail during this? They have trained valid models before, but should we still use those models or remove the entire model? Currently we still use models.\n # TODO: Time allowance can be made better by only using time taken during final model training and not during HPO and feature pruning.\n # TODO: Time allowance not accurate if running from fit_continue\n # TODO: Remove level and stack_name arguments, can get them automatically\n # TODO: Make sure that pretraining on X_unlabeled only happens 1 time rather than every fold of bagging. (Do during pretrain API work?)\n def _train_multi_repeats(self, X, y, models: list, n_repeats, n_repeat_start=1, time_limit=None, time_limit_total_level=None, **kwargs) -> List[str]:\n \"\"\"\n Fits bagged ensemble models with additional folds and/or bagged repeats.\n Models must have already been fit prior to entering this method.\n This method should only be called in self._train_multi\n Returns a list of successfully trained and saved model names.\n \"\"\"\n if time_limit_total_level is None:\n time_limit_total_level = time_limit\n models_valid = models\n models_valid_next = []\n repeats_completed = 0\n time_start = time.time()\n for n in range(n_repeat_start, n_repeats):\n if not models_valid:\n break # No models to repeat\n if time_limit is not None:\n time_start_repeat = time.time()\n time_left = time_limit - (time_start_repeat - time_start)\n if n == n_repeat_start:\n time_required = time_limit_total_level * 0.575 # Require slightly over 50% to be safe\n else:\n time_required = (time_start_repeat - time_start) / repeats_completed * (0.575/0.425)\n if time_left < time_required:\n logger.log(15, 'Not enough time left to finish repeated k-fold bagging, stopping early ...')\n break\n logger.log(20, f'Repeating k-fold bagging: {n+1}/{n_repeats}')\n for i, model in enumerate(models_valid):\n if not self.get_model_attribute(model=model, attribute='can_fit'):\n if isinstance(model, str):\n models_valid_next.append(model)\n else:\n models_valid_next.append(model.name)\n continue\n\n if isinstance(model, str):\n model = self.load_model(model)\n if not isinstance(model, BaggedEnsembleModel):\n raise AssertionError(f'{model.name} must inherit from BaggedEnsembleModel to perform repeated k-fold bagging. Model type: {type(model).__name__}')\n if time_limit is None:\n time_left = None\n else:\n time_start_model = time.time()\n time_left = time_limit - (time_start_model - time_start)\n\n models_valid_next += self._train_single_full(X=X, y=y, model=model, k_fold_start=0, k_fold_end=None, n_repeats=n + 1, n_repeat_start=n, time_limit=time_left, **kwargs)\n models_valid = copy.deepcopy(models_valid_next)\n models_valid_next = []\n repeats_completed += 1\n logger.log(20, f'Completed {n_repeat_start + repeats_completed}/{n_repeats} k-fold bagging repeats ...')\n return models_valid\n\n def _train_multi_initial(self, X, y, models: List[AbstractModel], k_fold, n_repeats, hyperparameter_tune_kwargs=None, feature_prune=False, time_limit=None, **kwargs) -> List[str]:\n \"\"\"\n Fits models that have not previously been fit.\n This method should only be called in self._train_multi\n Returns a list of successfully trained and saved model names.\n \"\"\"\n fit_args = dict(\n X=X,\n y=y,\n k_fold=k_fold,\n )\n fit_args.update(kwargs)\n hpo_enabled = False\n if hyperparameter_tune_kwargs:\n for key in hyperparameter_tune_kwargs:\n if hyperparameter_tune_kwargs[key] is not None:\n hpo_enabled = True\n break\n\n hpo_time_ratio = 0.9\n if hpo_enabled:\n time_split = True\n else:\n time_split = False\n if k_fold == 0:\n time_ratio = hpo_time_ratio if hpo_enabled else 1\n models = self._train_multi_fold(models=models, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs, feature_prune=feature_prune, time_limit=time_limit, time_split=time_split, time_ratio=time_ratio, **fit_args)\n else:\n k_fold_start = 0\n if hpo_enabled or feature_prune:\n time_start = time.time()\n time_ratio = (1 / k_fold) * hpo_time_ratio\n models = self._train_multi_fold(models=models, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs, feature_prune=feature_prune,\n k_fold_start=0, k_fold_end=1, n_repeats=n_repeats, n_repeat_start=0, time_limit=time_limit, time_split=time_split, time_ratio=time_ratio, **fit_args)\n k_fold_start = 1\n if time_limit is not None:\n time_limit = time_limit - (time.time() - time_start)\n\n models = self._train_multi_fold(models=models, hyperparameter_tune_kwargs=None, feature_prune=False, k_fold_start=k_fold_start, k_fold_end=k_fold, n_repeats=n_repeats, n_repeat_start=0, time_limit=time_limit, **fit_args)\n\n return models\n\n # TODO: Ban KNN from being a Stacker model outside of aux. Will need to ensemble select on all stack layers ensemble selector to make it work\n # TODO: Robert dataset, LightGBM is super good but RF and KNN take all the time away from it on 1h despite being much worse\n # TODO: Add time_limit_per_model\n # TODO: Rename for v0.1\n def _train_multi_fold(self, X, y, models: List[AbstractModel], time_limit=None, time_split=False,\n time_ratio=1, hyperparameter_tune_kwargs=None, **kwargs) -> List[str]:\n \"\"\"\n Trains and saves a list of models sequentially.\n This method should only be called in self._train_multi_initial\n Returns a list of trained model names.\n \"\"\"\n models_valid = []\n time_start = time.time()\n if time_limit is not None:\n time_limit = time_limit * time_ratio\n if time_limit is not None and len(models) > 0:\n time_limit_model_split = time_limit / len(models)\n else:\n time_limit_model_split = time_limit\n for i, model in enumerate(models):\n if isinstance(model, str):\n model = self.load_model(model)\n elif self.low_memory:\n model = copy.deepcopy(model)\n if hyperparameter_tune_kwargs is not None and isinstance(hyperparameter_tune_kwargs, dict):\n hyperparameter_tune_kwargs_model = hyperparameter_tune_kwargs.get(model.name, None)\n else:\n hyperparameter_tune_kwargs_model = None\n # TODO: Only update scores when finished, only update model as part of final models if finished!\n if time_split:\n time_left = time_limit_model_split\n else:\n if time_limit is None:\n time_left = None\n else:\n time_start_model = time.time()\n time_left = time_limit - (time_start_model - time_start)\n model_name_trained_lst = self._train_single_full(X, y, model, time_limit=time_left,\n hyperparameter_tune_kwargs=hyperparameter_tune_kwargs_model, **kwargs)\n\n if self.low_memory:\n del model\n models_valid += model_name_trained_lst\n\n return models_valid\n\n def _train_multi(self, X, y, models: List[AbstractModel], hyperparameter_tune_kwargs=None, feature_prune=False, k_fold=None, n_repeats=None, n_repeat_start=0, time_limit=None, **kwargs) -> List[str]:\n \"\"\"\n Train a list of models using the same data.\n Assumes that input data has already been processed in the form the models will receive as input (including stack feature generation).\n Trained models are available in the trainer object.\n Note: Consider using public APIs instead of this.\n Returns a list of trained model names.\n \"\"\"\n time_limit_total_level = time_limit\n if k_fold is None:\n k_fold = self.k_fold\n if n_repeats is None:\n n_repeats = self.n_repeats\n if (k_fold == 0) and (n_repeats != 1):\n raise ValueError(f'n_repeats must be 1 when k_fold is 0, values: ({n_repeats}, {k_fold})')\n if time_limit is None:\n n_repeats_initial = n_repeats\n else:\n n_repeats_initial = 1\n if n_repeat_start == 0:\n time_start = time.time()\n model_names_trained = self._train_multi_initial(X=X, y=y, models=models, k_fold=k_fold, n_repeats=n_repeats_initial, hyperparameter_tune_kwargs=hyperparameter_tune_kwargs, feature_prune=feature_prune,\n time_limit=time_limit, **kwargs)\n n_repeat_start = n_repeats_initial\n if time_limit is not None:\n time_limit = time_limit - (time.time() - time_start)\n else:\n model_names_trained = models\n if (n_repeats > 1) and (n_repeat_start < n_repeats):\n model_names_trained = self._train_multi_repeats(X=X, y=y, models=model_names_trained,\n k_fold=k_fold, n_repeats=n_repeats, n_repeat_start=n_repeat_start, time_limit=time_limit, time_limit_total_level=time_limit_total_level, **kwargs)\n return model_names_trained\n\n def _train_multi_and_ensemble(self, X, y, X_val, y_val, hyperparameters: dict = None, X_unlabeled=None, num_stack_levels=0, time_limit=None, groups=None, **kwargs) -> List[str]:\n \"\"\"Identical to self.train_multi_levels, but also saves the data to disk. This should only ever be called once.\"\"\"\n if self.save_data and not self.is_data_saved:\n self.save_X(X)\n self.save_y(y)\n if X_val is not None:\n self.save_X_val(X_val)\n if y_val is not None:\n self.save_y_val(y_val)\n self.is_data_saved = True\n if self._groups is None:\n self._groups = groups\n self._num_rows_train = len(X)\n if X_val is not None:\n self._num_rows_train += len(X_val)\n self._num_cols_train = len(list(X.columns))\n model_names_fit = self.train_multi_levels(X, y, hyperparameters=hyperparameters, X_val=X_val, y_val=y_val,\n X_unlabeled=X_unlabeled, level_start=1, level_end=num_stack_levels+1, time_limit=time_limit, **kwargs)\n if len(self.get_model_names()) == 0:\n raise ValueError('AutoGluon did not successfully train any models')\n return model_names_fit\n\n def _predict_model(self, X, model, model_pred_proba_dict=None):\n if isinstance(model, str):\n model = self.load_model(model)\n X = self.get_inputs_to_model(model=model, X=X, model_pred_proba_dict=model_pred_proba_dict, fit=False)\n y_pred = model.predict(X=X)\n if self._regress_preds_asprobas and model.problem_type == REGRESSION: # Convert regression preds to classes (during distillation)\n if (len(y_pred.shape) > 1) and (y_pred.shape[1] > 1):\n problem_type = MULTICLASS\n else:\n problem_type = BINARY\n y_pred = get_pred_from_proba(y_pred_proba=y_pred, problem_type=problem_type)\n return y_pred\n\n def _predict_proba_model(self, X, model, model_pred_proba_dict=None):\n if isinstance(model, str):\n model = self.load_model(model)\n X = self.get_inputs_to_model(model=model, X=X, model_pred_proba_dict=model_pred_proba_dict, fit=False)\n return model.predict_proba(X=X)\n\n def _get_dummy_stacker(self, level: int, model_levels: dict, use_orig_features=True) -> StackerEnsembleModel:\n model_names = model_levels[level - 1]\n base_models_dict = {}\n for model_name in model_names:\n if model_name in self.models.keys():\n base_models_dict[model_name] = self.models[model_name]\n hyperparameters = dict(\n use_orig_features=use_orig_features,\n max_base_models_per_type=0,\n max_base_models=0,\n )\n dummy_stacker = StackerEnsembleModel(\n path='',\n name='',\n model_base=AbstractModel(\n path='',\n name='',\n problem_type=self.problem_type,\n eval_metric=self.eval_metric,\n hyperparameters={'ag_args_fit': {'quantile_levels': self.quantile_levels}}\n ),\n base_model_names=model_names,\n base_models_dict=base_models_dict,\n base_model_paths_dict=self.get_models_attribute_dict(attribute='path', models=model_names),\n base_model_types_dict=self.get_models_attribute_dict(attribute='type', models=model_names),\n hyperparameters=hyperparameters,\n random_state=level+self.random_state\n )\n dummy_stacker.initialize(num_classes=self.num_classes)\n return dummy_stacker\n\n # TODO: Enable raw=True for bagged models when X=None\n # This is non-trivial to implement for multi-layer stacking ensembles on the OOF data.\n # TODO: Consider limiting X to 10k rows here instead of inside the model call\n def get_feature_importance(self, model=None, X=None, y=None, raw=True, **kwargs) -> pd.DataFrame:\n if model is None:\n model = self.model_best\n model: AbstractModel = self.load_model(model)\n if X is None and model.val_score is None:\n raise AssertionError(f'Model {model.name} is not valid for generating feature importances on original training data because no validation data was used during training, please specify new test data to compute feature importances.')\n\n if X is None:\n if isinstance(model, WeightedEnsembleModel):\n if self.bagged_mode:\n if raw:\n raise AssertionError('`feature_stage=\\'transformed\\'` feature importance on the original training data is not yet supported when bagging is enabled, please specify new test data to compute feature importances.')\n X = None\n is_oof = True\n else:\n if raw:\n X = self.load_X_val()\n else:\n X = None\n is_oof = False\n elif isinstance(model, BaggedEnsembleModel):\n if raw:\n raise AssertionError('`feature_stage=\\'transformed\\'` feature importance on the original training data is not yet supported when bagging is enabled, please specify new test data to compute feature importances.')\n X = self.load_X()\n X = self.get_inputs_to_model(model=model, X=X, fit=True)\n is_oof = True\n else:\n X = self.load_X_val()\n if not raw:\n X = self.get_inputs_to_model(model=model, X=X, fit=False)\n is_oof = False\n else:\n is_oof = False\n if not raw:\n X = self.get_inputs_to_model(model=model, X=X, fit=False)\n\n if y is None and X is not None:\n if is_oof:\n y = self.load_y()\n else:\n y = self.load_y_val()\n\n if raw:\n return self._get_feature_importance_raw(X=X, y=y, model=model, **kwargs)\n else:\n if is_oof:\n kwargs['is_oof'] = is_oof\n return model.compute_feature_importance(X=X, y=y, **kwargs)\n\n # TODO: Can get feature importances of all children of model at no extra cost, requires scoring the values after predict_proba on each model\n # Could solve by adding a self.score_all() function which takes model as input and also returns scores of all children models.\n # This would be best solved after adding graph representation, it lives most naturally in AbstractModel\n # TODO: Can skip features which were pruned on all models that model depends on (Complex to implement, requires graph representation)\n # TODO: Note that raw importance will not equal non-raw importance for bagged models, even if raw features are identical to the model features.\n # This is because for non-raw, we do an optimization where each fold model calls .compute_feature_importance(), and then the feature importances are averaged across the folds.\n # This is different from raw, where the predictions of the folds are averaged and then feature importance is computed.\n # Consider aligning these methods so they produce the same result.\n # The output of this function is identical to non-raw when model is level 1 and non-bagged\n def _get_feature_importance_raw(self, X, y, model, eval_metric=None, **kwargs) -> pd.DataFrame:\n if eval_metric is None:\n eval_metric = self.eval_metric\n if model is None:\n model = self.model_best\n if eval_metric.needs_pred:\n predict_func = self.predict\n else:\n predict_func = self.predict_proba\n model: AbstractModel = self.load_model(model)\n predict_func_kwargs = dict(model=model)\n return compute_permutation_feature_importance(\n X=X, y=y, predict_func=predict_func, predict_func_kwargs=predict_func_kwargs, eval_metric=eval_metric, **kwargs\n )\n\n def _get_models_load_info(self, model_names):\n model_names = copy.deepcopy(model_names)\n model_paths = self.get_models_attribute_dict(attribute='path', models=model_names)\n model_types = self.get_models_attribute_dict(attribute='type', models=model_names)\n return model_names, model_paths, model_types\n\n # Sums the attribute value across all models that the provided model depends on, including itself.\n # For instance, this function can return the expected total predict_time of a model.\n # attribute is the name of the desired attribute to be summed, or a dictionary of model name -> attribute value if the attribute is not present in the graph.\n def get_model_attribute_full(self, model, attribute, func=sum):\n base_model_set = self.get_minimum_model_set(model)\n if isinstance(attribute, dict):\n is_dict = True\n else:\n is_dict = False\n if len(base_model_set) == 1:\n if is_dict:\n return attribute[model]\n else:\n return self.model_graph.nodes[base_model_set[0]][attribute]\n # attribute_full = 0\n attribute_lst = []\n for base_model in base_model_set:\n if is_dict:\n attribute_base_model = attribute[base_model]\n else:\n attribute_base_model = self.model_graph.nodes[base_model][attribute]\n if attribute_base_model is None:\n return None\n attribute_lst.append(attribute_base_model)\n # attribute_full += attribute_base_model\n if attribute_lst:\n attribute_full = func(attribute_lst)\n else:\n attribute_full = 0\n return attribute_full\n\n # Returns dictionary of model name -> attribute value for the provided attribute\n def get_models_attribute_dict(self, attribute, models: list = None) -> dict:\n models_attribute_dict = nx.get_node_attributes(self.model_graph, attribute)\n if models is not None:\n model_names = []\n for model in models:\n if not isinstance(model, str):\n model = model.name\n model_names.append(model)\n models_attribute_dict = {key: val for key, val in models_attribute_dict.items() if key in model_names}\n return models_attribute_dict\n\n # TODO: v0.1 Proper error catching\n # Returns attribute value for the given model\n def get_model_attribute(self, model, attribute: str):\n if not isinstance(model, str):\n model = model.name\n return self.model_graph.nodes[model][attribute]\n\n def set_model_attribute(self, model, attribute: str, val):\n if not isinstance(model, str):\n model = model.name\n self.model_graph.nodes[model][attribute] = val\n\n # Gets the minimum set of models that the provided model depends on, including itself\n # Returns a list of model names\n def get_minimum_model_set(self, model, include_self=True) -> list:\n if not isinstance(model, str):\n model = model.name\n minimum_model_set = list(nx.bfs_tree(self.model_graph, model, reverse=True))\n if not include_self:\n minimum_model_set = [m for m in minimum_model_set if m != model]\n return minimum_model_set\n\n # Gets the minimum set of models that the provided models depend on, including themselves\n # Returns a list of model names\n def get_minimum_models_set(self, models: list) -> list:\n models_set = set()\n for model in models:\n models_set = models_set.union(self.get_minimum_model_set(model))\n return list(models_set)\n\n # Gets the set of base models used directly by the provided model\n # Returns a list of model names\n def get_base_model_names(self, model) -> list:\n if not isinstance(model, str):\n model = model.name\n base_model_set = list(self.model_graph.predecessors(model))\n return base_model_set\n\n def _get_banned_model_names(self) -> list:\n \"\"\"Gets all model names which would cause model files to be overwritten if a new model was trained with the name\"\"\"\n return self.get_model_names() + list(self._extra_banned_names)\n\n def leaderboard(self, extra_info=False):\n model_names = self.get_model_names()\n score_val = []\n fit_time_marginal = []\n pred_time_val_marginal = []\n stack_level = []\n fit_time = []\n pred_time_val = []\n can_infer = []\n fit_order = list(range(1, len(model_names)+1))\n score_val_dict = self.get_models_attribute_dict('val_score')\n fit_time_marginal_dict = self.get_models_attribute_dict('fit_time')\n predict_time_marginal_dict = self.get_models_attribute_dict('predict_time')\n for model_name in model_names:\n score_val.append(score_val_dict[model_name])\n fit_time_marginal.append(fit_time_marginal_dict[model_name])\n fit_time.append(self.get_model_attribute_full(model=model_name, attribute='fit_time'))\n pred_time_val_marginal.append(predict_time_marginal_dict[model_name])\n pred_time_val.append(self.get_model_attribute_full(model=model_name, attribute='predict_time'))\n stack_level.append(self.get_model_level(model_name))\n can_infer.append(self.model_graph.nodes[model_name]['can_infer'])\n\n model_info_dict = defaultdict(list)\n if extra_info:\n # TODO: feature_metadata\n # TODO: disk size\n # TODO: load time\n # TODO: Add persist_if_mem_safe() function to persist in memory all models if reasonable memory size (or a specific model+ancestors)\n # TODO: Add is_persisted() function to check which models are persisted in memory\n # TODO: package_dependencies, package_dependencies_full\n\n info = self.get_info(include_model_info=True)\n model_info = info['model_info']\n custom_model_info = {}\n for model_name in model_info:\n custom_info = {}\n bagged_info = model_info[model_name].get('bagged_info', {})\n custom_info['num_models'] = bagged_info.get('num_child_models', 1)\n custom_info['memory_size'] = bagged_info.get('max_memory_size', model_info[model_name]['memory_size'])\n custom_info['memory_size_min'] = bagged_info.get('min_memory_size', model_info[model_name]['memory_size'])\n custom_info['child_model_type'] = bagged_info.get('child_model_type', None)\n custom_info['child_hyperparameters'] = bagged_info.get('child_hyperparameters', None)\n custom_info['child_hyperparameters_fit'] = bagged_info.get('child_hyperparameters_fit', None)\n custom_info['child_ag_args_fit'] = bagged_info.get('child_ag_args_fit', None)\n custom_model_info[model_name] = custom_info\n\n model_info_keys = ['num_features', 'model_type', 'hyperparameters', 'hyperparameters_fit', 'ag_args_fit', 'features']\n model_info_sum_keys = []\n for key in model_info_keys:\n model_info_dict[key] = [model_info[model_name][key] for model_name in model_names]\n if key in model_info_sum_keys:\n key_dict = {model_name: model_info[model_name][key] for model_name in model_names}\n model_info_dict[key + '_full'] = [self.get_model_attribute_full(model=model_name, attribute=key_dict) for model_name in model_names]\n\n model_info_keys = ['num_models', 'memory_size', 'memory_size_min', 'child_model_type', 'child_hyperparameters', 'child_hyperparameters_fit', 'child_ag_args_fit']\n model_info_full_keys = {'memory_size': [('memory_size_w_ancestors', sum)], 'memory_size_min': [('memory_size_min_w_ancestors', max)], 'num_models': [('num_models_w_ancestors', sum)]}\n for key in model_info_keys:\n model_info_dict[key] = [custom_model_info[model_name][key] for model_name in model_names]\n if key in model_info_full_keys:\n key_dict = {model_name: custom_model_info[model_name][key] for model_name in model_names}\n for column_name, func in model_info_full_keys[key]:\n model_info_dict[column_name] = [self.get_model_attribute_full(model=model_name, attribute=key_dict, func=func) for model_name in model_names]\n\n ancestors = [list(nx.dag.ancestors(self.model_graph, model_name)) for model_name in model_names]\n descendants = [list(nx.dag.descendants(self.model_graph, model_name)) for model_name in model_names]\n\n model_info_dict['num_ancestors'] = [len(ancestor_lst) for ancestor_lst in ancestors]\n model_info_dict['num_descendants'] = [len(descendant_lst) for descendant_lst in descendants]\n model_info_dict['ancestors'] = ancestors\n model_info_dict['descendants'] = descendants\n\n df = pd.DataFrame(data={\n 'model': model_names,\n 'score_val': score_val,\n 'pred_time_val': pred_time_val,\n 'fit_time': fit_time,\n 'pred_time_val_marginal': pred_time_val_marginal,\n 'fit_time_marginal': fit_time_marginal,\n 'stack_level': stack_level,\n 'can_infer': can_infer,\n 'fit_order': fit_order,\n **model_info_dict,\n })\n df_sorted = df.sort_values(by=['score_val', 'pred_time_val', 'model'], ascending=[False, True, False]).reset_index(drop=True)\n\n df_columns_lst = df_sorted.columns.tolist()\n explicit_order = [\n 'model',\n 'score_val',\n 'pred_time_val',\n 'fit_time',\n 'pred_time_val_marginal',\n 'fit_time_marginal',\n 'stack_level',\n 'can_infer',\n 'fit_order',\n 'num_features',\n 'num_models',\n 'num_models_w_ancestors',\n 'memory_size',\n 'memory_size_w_ancestors',\n 'memory_size_min',\n 'memory_size_min_w_ancestors',\n 'num_ancestors',\n 'num_descendants',\n 'model_type',\n 'child_model_type'\n ]\n explicit_order = [column for column in explicit_order if column in df_columns_lst]\n df_columns_other = [column for column in df_columns_lst if column not in explicit_order]\n df_columns_new = explicit_order + df_columns_other\n df_sorted = df_sorted[df_columns_new]\n\n return df_sorted\n\n def get_info(self, include_model_info=False) -> dict:\n num_models_trained = len(self.get_model_names())\n if self.model_best is not None:\n best_model = self.model_best\n else:\n try:\n best_model = self.get_model_best()\n except AssertionError:\n best_model = None\n if best_model is not None:\n best_model_score_val = self.get_model_attribute(model=best_model, attribute='val_score')\n best_model_stack_level = self.get_model_level(best_model)\n else:\n best_model_score_val = None\n best_model_stack_level = None\n # fit_time = None\n num_bag_folds = self.k_fold\n max_core_stack_level = self.get_max_level('core')\n max_stack_level = self.get_max_level()\n\n problem_type = self.problem_type\n eval_metric = self.eval_metric.name\n time_train_start = self._time_train_start\n num_rows_train = self._num_rows_train\n num_cols_train = self._num_cols_train\n num_classes = self.num_classes\n # TODO:\n # Disk size of models\n # Raw feature count\n # HPO time\n # Bag time\n # Feature prune time\n # Exception count / models failed count\n # True model count (models * kfold)\n # AutoGluon version fit on\n # Max memory usage\n # CPU count used / GPU count used\n\n info = {\n 'time_train_start': time_train_start,\n 'num_rows_train': num_rows_train,\n 'num_cols_train': num_cols_train,\n 'num_classes': num_classes,\n 'problem_type': problem_type,\n 'eval_metric': eval_metric,\n 'best_model': best_model,\n 'best_model_score_val': best_model_score_val,\n 'best_model_stack_level': best_model_stack_level,\n 'num_models_trained': num_models_trained,\n 'num_bag_folds': num_bag_folds,\n 'max_stack_level': max_stack_level,\n 'max_core_stack_level': max_core_stack_level,\n }\n\n if include_model_info:\n info['model_info'] = self.get_models_info()\n\n return info\n\n def get_models_info(self, models: List[str] = None) -> dict:\n if models is None:\n models = self.get_model_names()\n model_info_dict = dict()\n for model in models:\n if isinstance(model, str):\n if model in self.models.keys():\n model = self.models[model]\n if isinstance(model, str):\n model_type = self.get_model_attribute(model=model, attribute='type')\n model_path = self.get_model_attribute(model=model, attribute='path')\n model_info_dict[model] = model_type.load_info(path=model_path)\n else:\n model_info_dict[model.name] = model.get_info()\n return model_info_dict\n\n def reduce_memory_size(self, remove_data=True, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):\n if remove_data and self.is_data_saved:\n data_files = [\n self.path_data + 'X.pkl',\n self.path_data + 'X_val.pkl',\n self.path_data + 'y.pkl',\n self.path_data + 'y_val.pkl',\n ]\n for data_file in data_files:\n try:\n os.remove(data_file)\n except FileNotFoundError:\n pass\n if requires_save:\n self.is_data_saved = False\n try:\n os.rmdir(self.path_data)\n except OSError:\n pass\n try:\n os.rmdir(self.path_utils)\n except OSError:\n pass\n models = self.get_model_names()\n for model in models:\n model = self.load_model(model)\n model.reduce_memory_size(remove_fit_stack=remove_fit_stack, remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, reduce_children=reduce_children, **kwargs)\n if requires_save:\n self.save_model(model, reduce_memory=False)\n if requires_save:\n self.save()\n\n # TODO: Also enable deletion of models which didn't succeed in training (files may still be persisted)\n # This includes the original HPO fold for stacking\n # Deletes specified models from trainer and from disk (if delete_from_disk=True).\n def delete_models(self, models_to_keep=None, models_to_delete=None, allow_delete_cascade=False, delete_from_disk=True, dry_run=True):\n if models_to_keep is not None and models_to_delete is not None:\n raise ValueError('Exactly one of [models_to_keep, models_to_delete] must be set.')\n if models_to_keep is not None:\n if not isinstance(models_to_keep, list):\n models_to_keep = [models_to_keep]\n minimum_model_set = set()\n for model in models_to_keep:\n minimum_model_set.update(self.get_minimum_model_set(model))\n minimum_model_set = list(minimum_model_set)\n models_to_remove = [model for model in self.get_model_names() if model not in minimum_model_set]\n elif models_to_delete is not None:\n if not isinstance(models_to_delete, list):\n models_to_delete = [models_to_delete]\n minimum_model_set = set(models_to_delete)\n minimum_model_set_orig = copy.deepcopy(minimum_model_set)\n for model in models_to_delete:\n minimum_model_set.update(nx.algorithms.dag.descendants(self.model_graph, model))\n if not allow_delete_cascade:\n if minimum_model_set != minimum_model_set_orig:\n raise AssertionError('models_to_delete contains models which cause a delete cascade due to other models being dependent on them. Set allow_delete_cascade=True to enable the deletion.')\n minimum_model_set = list(minimum_model_set)\n models_to_remove = [model for model in self.get_model_names() if model in minimum_model_set]\n else:\n raise ValueError('Exactly one of [models_to_keep, models_to_delete] must be set.')\n\n if dry_run:\n logger.log(30, f'Dry run enabled, AutoGluon would have deleted the following models: {models_to_remove}')\n if delete_from_disk:\n for model in models_to_remove:\n model = self.load_model(model)\n logger.log(30, f'\\tDirectory {model.path} would have been deleted.')\n logger.log(30, f'To perform the deletion, set dry_run=False')\n return\n\n if delete_from_disk:\n for model in models_to_remove:\n model = self.load_model(model)\n model.delete_from_disk()\n\n self.model_graph.remove_nodes_from(models_to_remove)\n for model in models_to_remove:\n if model in self.models:\n self.models.pop(model)\n\n models_kept = self.get_model_names()\n\n if self.model_best is not None and self.model_best not in models_kept:\n try:\n self.model_best = self.get_model_best()\n except AssertionError:\n self.model_best = None\n\n # TODO: Delete from all the other model dicts\n self.save()\n\n @classmethod\n def load(cls, path, reset_paths=False):\n load_path = path + cls.trainer_file_name\n if not reset_paths:\n return load_pkl.load(path=load_path)\n else:\n obj = load_pkl.load(path=load_path)\n obj.set_contexts(path)\n obj.reset_paths = reset_paths\n return obj\n\n @classmethod\n def load_info(cls, path, reset_paths=False, load_model_if_required=True):\n load_path = path + cls.trainer_info_name\n try:\n return load_pkl.load(path=load_path)\n except:\n if load_model_if_required:\n trainer = cls.load(path=path, reset_paths=reset_paths)\n return trainer.get_info()\n else:\n raise\n\n def save_info(self, include_model_info=False):\n info = self.get_info(include_model_info=include_model_info)\n\n save_pkl.save(path=self.path + self.trainer_info_name, object=info)\n save_json.save(path=self.path + self.trainer_info_json_name, obj=info)\n return info\n\n def _process_hyperparameters(self, hyperparameters: dict) -> dict:\n return process_hyperparameters(hyperparameters=hyperparameters)\n\n def _get_full_model_val_score(self, model: str) -> float:\n model_full_dict_inverse = {full: orig for orig, full in self.model_full_dict.items()}\n model_performances = self.get_models_attribute_dict(attribute='val_score')\n\n normal_model = model_full_dict_inverse[model]\n if normal_model not in model_performances:\n # normal model is deleted\n if model not in self._model_full_dict_val_score:\n raise ValueError(f'_FULL model {model} had the model it was based on ({normal_model}) deleted, and the validation score was not stored.')\n val_score = self._model_full_dict_val_score[model]\n else:\n # normal model exists\n val_score = model_performances[normal_model]\n return val_score\n\n def distill(self, X=None, y=None, X_val=None, y_val=None, X_unlabeled=None,\n time_limit=None, hyperparameters=None, holdout_frac=None, verbosity=None,\n models_name_suffix=None, teacher=None, teacher_preds='soft',\n augmentation_data=None, augment_method='spunge', augment_args={'size_factor':5,'max_size':int(1e5)},\n augmented_sample_weight=1.0):\n \"\"\" Various distillation algorithms.\n Args:\n X, y: pd.DataFrame and pd.Series of training data.\n If None, original training data used during predictor.fit() will be loaded.\n This data is split into train/validation if X_val, y_val are None.\n X_val, y_val: pd.DataFrame and pd.Series of validation data.\n time_limit, hyperparameters, holdout_frac: defined as in predictor.fit()\n teacher (None or str):\n If None, uses the model with the highest validation score as the teacher model, otherwise use the specified model name as the teacher.\n teacher_preds (None or str): If None, we only train with original labels (no data augmentation, overrides augment_method)\n If 'hard', labels are hard teacher predictions given by: teacher.predict()\n If 'soft', labels are soft teacher predictions given by: teacher.predict_proba()\n Note: 'hard' and 'soft' are equivalent for regression problems.\n If augment_method specified, teacher predictions are only used to label augmented data (training data keeps original labels).\n To apply label-smoothing: teacher_preds='onehot' will use original training data labels converted to one-hots for multiclass (no data augmentation). # TODO: expose smoothing-hyperparameter.\n models_name_suffix (str): Suffix to append to each student model's name, new names will look like: 'MODELNAME_dstl_SUFFIX'\n augmentation_data: pd.DataFrame of additional data to use as \"augmented data\" (does not contain labels).\n When specified, augment_method, augment_args are ignored, and this is the only augmented data that is used (teacher_preds cannot be None).\n augment_method (None or str): specifies which augmentation strategy to utilize. Options: [None, 'spunge','munge']\n If None, no augmentation gets applied.\n }\n augment_args (dict): args passed into the augmentation function corresponding to augment_method.\n augmented_sample_weight (float): Nonnegative value indicating how much to weight augmented samples. This is only considered if sample_weight was initially specified in Predictor.\n \"\"\"\n if verbosity is None:\n verbosity = self.verbosity\n\n if teacher is None:\n teacher = self._get_best()\n\n hyperparameter_tune = False # TODO: add as argument with scheduler options.\n if augmentation_data is not None and teacher_preds is None:\n raise ValueError(\"augmentation_data must be None if teacher_preds is None\")\n\n logger.log(20, f\"Distilling with teacher='{teacher}', teacher_preds={str(teacher_preds)}, augment_method={str(augment_method)} ...\")\n if teacher not in self.get_model_names(can_infer=True):\n raise AssertionError(f\"Teacher model '{teacher}' is not a valid teacher model! Either it does not exist or it cannot infer on new data.\\n\"\n f\"Valid teacher models: {self.get_model_names(can_infer=True)}\")\n if X is None:\n if y is not None:\n raise ValueError(\"X cannot be None when y specified.\")\n X = self.load_X()\n X_val = self.load_X_val()\n\n if y is None:\n y = self.load_y()\n y_val = self.load_y_val()\n\n if X_val is None:\n if y_val is not None:\n raise ValueError(\"X_val cannot be None when y_val specified.\")\n if holdout_frac is None:\n holdout_frac = default_holdout_frac(len(X), hyperparameter_tune)\n X, X_val, y, y_val = generate_train_test_split(X, y, problem_type=self.problem_type, test_size=holdout_frac)\n\n y_val_og = y_val.copy()\n og_bagged_mode = self.bagged_mode\n og_verbosity = self.verbosity\n self.bagged_mode = False # turn off bagging\n self.verbosity = verbosity # change verbosity for distillation\n\n if self.sample_weight is not None:\n X, w = extract_column(X, self.sample_weight)\n\n if teacher_preds is None or teacher_preds == 'onehot':\n augment_method = None\n logger.log(20, \"Training students without a teacher model. Set teacher_preds = 'soft' or 'hard' to distill using the best AutoGluon predictor as teacher.\")\n\n if teacher_preds in ['onehot','soft']:\n y = format_distillation_labels(y, self.problem_type, self.num_classes)\n y_val = format_distillation_labels(y_val, self.problem_type, self.num_classes)\n\n if augment_method is None and augmentation_data is None:\n if teacher_preds == 'hard':\n y_pred = pd.Series(self.predict(X, model=teacher))\n if (self.problem_type != REGRESSION) and (len(y_pred.unique()) < len(y.unique())): # add missing labels\n logger.log(15, \"Adding missing labels to distillation dataset by including some real training examples\")\n indices_to_add = []\n for clss in y.unique():\n if clss not in y_pred.unique():\n logger.log(15, f\"Fetching a row with label={clss} from training data\")\n clss_index = y[y == clss].index[0]\n indices_to_add.append(clss_index)\n X_extra = X.loc[indices_to_add].copy()\n y_extra = y.loc[indices_to_add].copy() # these are actually real training examples\n X = pd.concat([X, X_extra])\n y_pred = pd.concat([y_pred, y_extra])\n if self.sample_weight is not None:\n w = pd.concat([w, w[indices_to_add]])\n y = y_pred\n elif teacher_preds == 'soft':\n y = self.predict_proba(X, model=teacher)\n if self.problem_type == MULTICLASS:\n y = pd.DataFrame(y)\n else:\n y = pd.Series(y)\n else:\n X_aug = augment_data(X=X, feature_metadata=self.feature_metadata,\n augmentation_data=augmentation_data, augment_method=augment_method, augment_args=augment_args)\n if len(X_aug) > 0:\n if teacher_preds == 'hard':\n y_aug = pd.Series(self.predict(X_aug, model=teacher))\n elif teacher_preds == 'soft':\n y_aug = self.predict_proba(X_aug, model=teacher)\n if self.problem_type == MULTICLASS:\n y_aug = pd.DataFrame(y_aug)\n else:\n y_aug = pd.Series(y_aug)\n else:\n raise ValueError(f\"Unknown teacher_preds specified: {teacher_preds}\")\n\n X = pd.concat([X, X_aug])\n y = pd.concat([y, y_aug])\n if self.sample_weight is not None:\n w = pd.concat([w, pd.Series([augmented_sample_weight]*len(X_aug))])\n\n X.reset_index(drop=True, inplace=True)\n y.reset_index(drop=True, inplace=True)\n if self.sample_weight is not None:\n w.reset_index(drop=True, inplace=True)\n X[self.sample_weight] = w\n\n name_suffix = '_DSTL' # all student model names contain this substring\n if models_name_suffix is not None:\n name_suffix = name_suffix + \"_\" + models_name_suffix\n\n if hyperparameters is None:\n hyperparameters = {'GBM': {}, 'CAT': {}, 'NN': {}, 'RF': {}}\n hyperparameters = self._process_hyperparameters(hyperparameters=hyperparameters) # TODO: consider exposing ag_args_fit, excluded_model_types as distill() arguments.\n if teacher_preds is not None and teacher_preds != 'hard' and self.problem_type != REGRESSION:\n self._regress_preds_asprobas = True\n\n core_kwargs = {\n 'stack_name': self.distill_stackname,\n 'get_models_func': self.construct_model_templates_distillation,\n }\n aux_kwargs = {\n 'get_models_func': self.construct_model_templates_distillation,\n 'check_if_best': False,\n }\n\n # self.bagged_mode = True # TODO: Add options for bagging\n models = self.train_multi_levels(\n X=X,\n y=y,\n X_val=X_val,\n y_val=y_val,\n hyperparameters=hyperparameters,\n time_limit=time_limit, # FIXME: Also limit augmentation time\n name_suffix=name_suffix,\n core_kwargs=core_kwargs,\n aux_kwargs=aux_kwargs,\n )\n\n distilled_model_names = []\n w_val = None\n if self.weight_evaluation:\n X_val, w_val = extract_column(X_val, self.sample_weight)\n for model_name in models: # finally measure original metric on validation data and overwrite stored val_scores\n model_score = self.score(X_val, y_val_og, model=model_name, weights=w_val)\n model_obj = self.load_model(model_name)\n model_obj.val_score = model_score\n model_obj.save() # TODO: consider omitting for sake of efficiency\n self.model_graph.nodes[model_name]['val_score'] = model_score\n distilled_model_names.append(model_name)\n leaderboard = self.leaderboard()\n logger.log(20, 'Distilled model leaderboard:')\n leaderboard_distilled = leaderboard[leaderboard['model'].isin(models)].reset_index(drop=True)\n with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000):\n logger.log(20, leaderboard_distilled)\n\n # reset trainer to old state before distill() was called:\n self.bagged_mode = og_bagged_mode # TODO: Confirm if safe to train future models after training models in both bagged and non-bagged modes\n self.verbosity = og_verbosity\n return distilled_model_names\n"
] |
[
[
"pandas.concat",
"pandas.Series",
"numpy.isnan",
"pandas.option_context",
"pandas.DataFrame"
]
] |
Reinaesaya/CIL-CARLA-CSC2621
|
[
"f6a983616c6e789a8a7f295e112dae89719c46d3"
] |
[
"PythonClient_planning/agents/imitation/imitation_learning.py"
] |
[
"from __future__ import print_function\n\nimport os\n\nimport scipy\n\nimport tensorflow as tf\nimport numpy as np\n\nslim = tf.contrib.slim\n\nfrom carla.agent import Agent\nfrom carla.carla_server_pb2 import Control\nfrom agents.imitation.imitation_learning_network import load_imitation_learning_network\n\n\nclass ImitationLearning(Agent):\n\n def __init__(self, city_name, avoid_stopping, memory_fraction=0.25, image_cut=[115, 510]):\n\n Agent.__init__(self)\n\n self.dropout_vec = [1.0] * 8 + [0.7] * 2 + [0.5] * 2 + [0.5] * 1 + [0.5, 1.] * 6\n\n config_gpu = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n\n # GPU to be selected, just take zero , select GPU with CUDA_VISIBLE_DEVICES\n\n config_gpu.gpu_options.visible_device_list = '0'\n\n config_gpu.gpu_options.per_process_gpu_memory_fraction = memory_fraction\n\n self._image_size = (88, 200, 3)\n self._avoid_stopping = avoid_stopping\n\n self._sess = tf.Session(config=config_gpu)\n\n with tf.device('/gpu:0'):\n self._input_images = tf.placeholder(\"float\", shape=[None, self._image_size[0],\n self._image_size[1],\n self._image_size[2]],\n name=\"input_image\")\n\n self._input_data = []\n\n self._input_data.append(tf.placeholder(tf.float32,\n shape=[None, 4], name=\"input_control\"))\n\n self._input_data.append(tf.placeholder(tf.float32,\n shape=[None, 1], name=\"input_speed\"))\n\n self._dout = tf.placeholder(\"float\", shape=[len(self.dropout_vec)])\n\n with tf.name_scope(\"Network\"):\n self._network_tensor = load_imitation_learning_network(self._input_images,\n self._input_data,\n self._image_size, self._dout)\n\n import os\n dir_path = os.path.dirname(__file__)\n\n self._models_path = dir_path + '/model/'\n\n # tf.reset_default_graph()\n self._sess.run(tf.global_variables_initializer())\n\n self.load_model()\n\n self._image_cut = image_cut\n\n def load_model(self):\n\n variables_to_restore = tf.global_variables()\n\n saver = tf.train.Saver(variables_to_restore, max_to_keep=0)\n\n if not os.path.exists(self._models_path):\n raise RuntimeError('failed to find the models path')\n\n ckpt = tf.train.get_checkpoint_state(self._models_path)\n if ckpt:\n print('Restoring from ', ckpt.model_checkpoint_path)\n saver.restore(self._sess, ckpt.model_checkpoint_path)\n else:\n ckpt = 0\n\n return ckpt\n\n def run_step(self, measurements, sensor_data, directions, target):\n\n control = self._compute_action(sensor_data['CameraRGB'].data,\n measurements.player_measurements.forward_speed, directions)\n\n return control\n\n def _compute_action(self, rgb_image, speed, direction=None):\n\n #rgb_image = rgb_image[self._image_cut[0]:self._image_cut[1], :]\n image_input = rgb_image\n #image_input = scipy.misc.imresize(rgb_image, [self._image_size[0],\n # self._image_size[1]])\n\n image_input = image_input.astype(np.float32)\n image_input = np.multiply(image_input, 1.0 / 255.0)\n\n steer, acc, brake = self._control_function(image_input, speed, direction, self._sess)\n\n # This a bit biased, but is to avoid fake breaking\n\n if brake < 0.1:\n brake = 0.0\n\n if acc > brake:\n brake = 0.0\n\n # We limit speed to 35 km/h to avoid\n if speed > 10.0 and brake == 0.0:\n acc = 0.0\n\n control = Control()\n control.steer = steer\n control.throttle = acc\n control.brake = brake\n\n control.hand_brake = 0\n control.reverse = 0\n\n return control\n\n def _control_function(self, image_input, speed, control_input, sess):\n\n branches = self._network_tensor\n x = self._input_images\n dout = self._dout\n input_speed = self._input_data[1]\n\n image_input = image_input.reshape(\n (1, self._image_size[0], self._image_size[1], self._image_size[2]))\n\n # Normalize with the maximum speed from the training set ( 90 km/h)\n #speed = np.array(speed / 25.0)\n speed = np.array(speed)\n speed = speed.reshape((1, 1))\n\n if control_input == 2 or control_input == 0.0:\n all_net = branches[0]\n elif control_input == 3:\n all_net = branches[2]\n elif control_input == 4:\n all_net = branches[3]\n elif control_input == 6: # Add stopping branch\n all_net = branches[4]\n else:\n all_net = branches[1]\n\n feedDict = {x: image_input, input_speed: speed, dout: [1] * len(self.dropout_vec)}\n\n output_all = sess.run(all_net, feed_dict=feedDict)\n\n predicted_steers = (output_all[0][0])\n\n predicted_acc = (output_all[0][1])\n\n predicted_brake = (output_all[0][2])\n\n if self._avoid_stopping:\n predicted_speed = sess.run(branches[5], feed_dict=feedDict) # Increment to 5 for stopping branch\n predicted_speed = predicted_speed[0][0]\n real_speed = speed * 25.0\n\n real_predicted = predicted_speed * 25.0\n if real_speed < 2.0 and real_predicted > 3.0:\n # If (Car Stooped) and\n # ( It should not have stopped, use the speed prediction branch for that)\n\n predicted_acc = 1 * (5.6 / 25.0 - speed) + predicted_acc\n\n predicted_brake = 0.0\n\n predicted_acc = predicted_acc[0][0]\n\n return predicted_steers, predicted_acc, predicted_brake\n"
] |
[
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.device",
"numpy.multiply",
"tensorflow.global_variables",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.GPUOptions",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.array"
]
] |
weleen/MGH.pytorch
|
[
"69f2830f6bd60fe3b33c80c04540c0c800d26de1"
] |
[
"fastreid/modeling/backbones/regnet/regnet.py"
] |
[
"import logging\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom fastreid.layers import get_norm\nfrom fastreid.utils import comm\nfrom fvcore.common.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message\nfrom .config import cfg as regnet_cfg\nfrom ..build import BACKBONE_REGISTRY\n\nlogger = logging.getLogger(__name__)\nmodel_urls = {\n '800x': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160905981/RegNetX-200MF_dds_8gpu.pyth',\n '800y': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906567/RegNetY-800MF_dds_8gpu.pyth',\n '1600x': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160990626/RegNetX-1.6GF_dds_8gpu.pyth',\n '1600y': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906681/RegNetY-1.6GF_dds_8gpu.pyth',\n '3200x': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906139/RegNetX-3.2GF_dds_8gpu.pyth',\n '3200y': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906834/RegNetY-3.2GF_dds_8gpu.pyth',\n '4000x': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth',\n '4000y': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth',\n '6400x': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/161116590/RegNetX-6.4GF_dds_8gpu.pyth',\n '6400y': 'https://dl.fbaipublicfiles.com/pycls/dds_baselines/160907112/RegNetY-6.4GF_dds_8gpu.pyth',\n}\n\n\ndef init_weights(m):\n \"\"\"Performs ResNet-style weight initialization.\"\"\"\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = (\n hasattr(m, \"final_bn\") and m.final_bn and regnet_cfg.BN.ZERO_INIT_FINAL_GAMMA\n )\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()\n\n\ndef get_stem_fun(stem_type):\n \"\"\"Retrives the stem function by name.\"\"\"\n stem_funs = {\n \"res_stem_cifar\": ResStemCifar,\n \"res_stem_in\": ResStemIN,\n \"simple_stem_in\": SimpleStemIN,\n }\n assert stem_type in stem_funs.keys(), \"Stem type '{}' not supported\".format(\n stem_type\n )\n return stem_funs[stem_type]\n\n\ndef get_block_fun(block_type):\n \"\"\"Retrieves the block function by name.\"\"\"\n block_funs = {\n \"vanilla_block\": VanillaBlock,\n \"res_basic_block\": ResBasicBlock,\n \"res_bottleneck_block\": ResBottleneckBlock,\n }\n assert block_type in block_funs.keys(), \"Block type '{}' not supported\".format(\n block_type\n )\n return block_funs[block_type]\n\n\ndef drop_connect(x, drop_ratio):\n \"\"\"Drop connect (adapted from DARTS).\"\"\"\n keep_ratio = 1.0 - drop_ratio\n mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)\n mask.bernoulli_(keep_ratio)\n x.div_(keep_ratio)\n x.mul_(mask)\n return x\n\nclass AnyHead(nn.Module):\n \"\"\"AnyNet head.\"\"\"\n\n def __init__(self, w_in, nc):\n super(AnyHead, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(w_in, nc, bias=True)\n\n def forward(self, x):\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\nclass VanillaBlock(nn.Module):\n \"\"\"Vanilla block: [3x3 conv, BN, Relu] x2\"\"\"\n\n def __init__(self, w_in, w_out, stride, bn_norm, bm=None, gw=None, se_r=None):\n assert (\n bm is None and gw is None and se_r is None\n ), \"Vanilla block does not support bm, gw, and se_r options\"\n super(VanillaBlock, self).__init__()\n self.construct(w_in, w_out, stride, bn_norm)\n\n def construct(self, w_in, w_out, stride, bn_norm):\n # 3x3, BN, ReLU\n self.a = nn.Conv2d(\n w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.a_bn = get_norm(bn_norm, w_out)\n self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)\n # 3x3, BN, ReLU\n self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)\n self.b_bn = get_norm(bn_norm, w_out)\n self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)\n\n def forward(self, x):\n for layer in self.children():\n x = layer(x)\n return x\n\n\nclass BasicTransform(nn.Module):\n \"\"\"Basic transformation: [3x3 conv, BN, Relu] x2\"\"\"\n\n def __init__(self, w_in, w_out, stride, bn_norm):\n super(BasicTransform, self).__init__()\n self.construct(w_in, w_out, stride, bn_norm)\n\n def construct(self, w_in, w_out, stride, bn_norm):\n # 3x3, BN, ReLU\n self.a = nn.Conv2d(\n w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False\n )\n self.a_bn = get_norm(bn_norm, w_out)\n self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)\n # 3x3, BN\n self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)\n self.b_bn = get_norm(bn_norm, w_out)\n self.b_bn.final_bn = True\n\n def forward(self, x):\n for layer in self.children():\n x = layer(x)\n return x\n\n\nclass ResBasicBlock(nn.Module):\n \"\"\"Residual basic block: x + F(x), F = basic transform\"\"\"\n\n def __init__(self, w_in, w_out, stride, bn_norm, bm=None, gw=None, se_r=None):\n assert (\n bm is None and gw is None and se_r is None\n ), \"Basic transform does not support bm, gw, and se_r options\"\n super(ResBasicBlock, self).__init__()\n self.construct(w_in, w_out, stride, bn_norm)\n\n def _add_skip_proj(self, w_in, w_out, stride, bn_norm):\n self.proj = nn.Conv2d(\n w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False\n )\n self.bn = get_norm(bn_norm, w_out)\n\n def construct(self, w_in, w_out, stride, bn_norm):\n # Use skip connection with projection if shape changes\n self.proj_block = (w_in != w_out) or (stride != 1)\n if self.proj_block:\n self._add_skip_proj(w_in, w_out, stride, bn_norm)\n self.f = BasicTransform(w_in, w_out, stride, bn_norm)\n self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)\n\n def forward(self, x):\n if self.proj_block:\n x = self.bn(self.proj(x)) + self.f(x)\n else:\n x = x + self.f(x)\n x = self.relu(x)\n return x\n\n\nclass SE(nn.Module):\n \"\"\"Squeeze-and-Excitation (SE) block\"\"\"\n\n def __init__(self, w_in, w_se):\n super(SE, self).__init__()\n self.construct(w_in, w_se)\n\n def construct(self, w_in, w_se):\n # AvgPool\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n # FC, Activation, FC, Sigmoid\n self.f_ex = nn.Sequential(\n nn.Conv2d(w_in, w_se, kernel_size=1, bias=True),\n nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE),\n nn.Conv2d(w_se, w_in, kernel_size=1, bias=True),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n return x * self.f_ex(self.avg_pool(x))\n\n\nclass BottleneckTransform(nn.Module):\n \"\"\"Bottlenect transformation: 1x1, 3x3, 1x1\"\"\"\n\n def __init__(self, w_in, w_out, stride, bn_norm, bm, gw, se_r):\n super(BottleneckTransform, self).__init__()\n self.construct(w_in, w_out, stride, bn_norm, bm, gw, se_r)\n\n def construct(self, w_in, w_out, stride, bn_norm, bm, gw, se_r):\n # Compute the bottleneck width\n w_b = int(round(w_out * bm))\n # Compute the number of groups\n num_gs = w_b // gw\n # 1x1, BN, ReLU\n self.a = nn.Conv2d(w_in, w_b, kernel_size=1, stride=1, padding=0, bias=False)\n self.a_bn = get_norm(bn_norm, w_b)\n self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)\n # 3x3, BN, ReLU\n self.b = nn.Conv2d(\n w_b, w_b, kernel_size=3, stride=stride, padding=1, groups=num_gs, bias=False\n )\n self.b_bn = get_norm(bn_norm, w_b)\n self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)\n # Squeeze-and-Excitation (SE)\n if se_r:\n w_se = int(round(w_in * se_r))\n self.se = SE(w_b, w_se)\n # 1x1, BN\n self.c = nn.Conv2d(w_b, w_out, kernel_size=1, stride=1, padding=0, bias=False)\n self.c_bn = get_norm(bn_norm, w_out)\n self.c_bn.final_bn = True\n\n def forward(self, x):\n for layer in self.children():\n x = layer(x)\n return x\n\n\nclass ResBottleneckBlock(nn.Module):\n \"\"\"Residual bottleneck block: x + F(x), F = bottleneck transform\"\"\"\n\n def __init__(self, w_in, w_out, stride, bn_norm, bm=1.0, gw=1, se_r=None):\n super(ResBottleneckBlock, self).__init__()\n self.construct(w_in, w_out, stride, bn_norm, bm, gw, se_r)\n\n def _add_skip_proj(self, w_in, w_out, stride, bn_norm):\n self.proj = nn.Conv2d(\n w_in, w_out, kernel_size=1, stride=stride, padding=0, bias=False\n )\n self.bn = get_norm(bn_norm, w_out)\n\n def construct(self, w_in, w_out, stride, bn_norm, bm, gw, se_r):\n # Use skip connection with projection if shape changes\n self.proj_block = (w_in != w_out) or (stride != 1)\n if self.proj_block:\n self._add_skip_proj(w_in, w_out, stride, bn_norm)\n self.f = BottleneckTransform(w_in, w_out, stride, bn_norm, bm, gw, se_r)\n self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)\n\n def forward(self, x):\n if self.proj_block:\n x = self.bn(self.proj(x)) + self.f(x)\n else:\n x = x + self.f(x)\n x = self.relu(x)\n return x\n\n\nclass ResStemCifar(nn.Module):\n \"\"\"ResNet stem for CIFAR.\"\"\"\n\n def __init__(self, w_in, w_out, bn_norm):\n super(ResStemCifar, self).__init__()\n self.construct(w_in, w_out, bn_norm)\n\n def construct(self, w_in, w_out, bn_norm):\n # 3x3, BN, ReLU\n self.conv = nn.Conv2d(\n w_in, w_out, kernel_size=3, stride=1, padding=1, bias=False\n )\n self.bn = get_norm(bn_norm, w_out)\n self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)\n\n def forward(self, x):\n for layer in self.children():\n x = layer(x)\n return x\n\n\nclass ResStemIN(nn.Module):\n \"\"\"ResNet stem for ImageNet.\"\"\"\n\n def __init__(self, w_in, w_out, bn_norm):\n super(ResStemIN, self).__init__()\n self.construct(w_in, w_out, bn_norm)\n\n def construct(self, w_in, w_out, bn_norm):\n # 7x7, BN, ReLU, maxpool\n self.conv = nn.Conv2d(\n w_in, w_out, kernel_size=7, stride=2, padding=3, bias=False\n )\n self.bn = get_norm(bn_norm, w_out)\n self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)\n self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n for layer in self.children():\n x = layer(x)\n return x\n\n\nclass SimpleStemIN(nn.Module):\n \"\"\"Simple stem for ImageNet.\"\"\"\n\n def __init__(self, in_w, out_w, bn_norm):\n super(SimpleStemIN, self).__init__()\n self.construct(in_w, out_w, bn_norm)\n\n def construct(self, in_w, out_w, bn_norm):\n # 3x3, BN, ReLU\n self.conv = nn.Conv2d(\n in_w, out_w, kernel_size=3, stride=2, padding=1, bias=False\n )\n self.bn = get_norm(bn_norm, out_w)\n self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)\n\n def forward(self, x):\n for layer in self.children():\n x = layer(x)\n return x\n\n\nclass AnyStage(nn.Module):\n \"\"\"AnyNet stage (sequence of blocks w/ the same output shape).\"\"\"\n\n def __init__(self, w_in, w_out, stride, bn_norm, d, block_fun, bm, gw, se_r):\n super(AnyStage, self).__init__()\n self.construct(w_in, w_out, stride, bn_norm, d, block_fun, bm, gw, se_r)\n\n def construct(self, w_in, w_out, stride, bn_norm, d, block_fun, bm, gw, se_r):\n # Construct the blocks\n for i in range(d):\n # Stride and w_in apply to the first block of the stage\n b_stride = stride if i == 0 else 1\n b_w_in = w_in if i == 0 else w_out\n # Construct the block\n self.add_module(\n \"b{}\".format(i + 1), block_fun(b_w_in, w_out, b_stride, bn_norm, bm, gw, se_r)\n )\n\n def forward(self, x):\n for block in self.children():\n x = block(x)\n return x\n\n\nclass AnyNet(nn.Module):\n \"\"\"AnyNet model.\"\"\"\n\n def __init__(self, **kwargs):\n super(AnyNet, self).__init__()\n if kwargs:\n self.construct(\n stem_type=kwargs[\"stem_type\"],\n stem_w=kwargs[\"stem_w\"],\n block_type=kwargs[\"block_type\"],\n ds=kwargs[\"ds\"],\n ws=kwargs[\"ws\"],\n ss=kwargs[\"ss\"],\n bn_norm=kwargs[\"bn_norm\"],\n bms=kwargs[\"bms\"],\n gws=kwargs[\"gws\"],\n se_r=kwargs[\"se_r\"],\n )\n else:\n self.construct(\n stem_type=regnet_cfg.ANYNET.STEM_TYPE,\n stem_w=regnet_cfg.ANYNET.STEM_W,\n block_type=regnet_cfg.ANYNET.BLOCK_TYPE,\n ds=regnet_cfg.ANYNET.DEPTHS,\n ws=regnet_cfg.ANYNET.WIDTHS,\n ss=regnet_cfg.ANYNET.STRIDES,\n bn_norm=regnet_cfg.ANYNET.BN_NORM,\n bms=regnet_cfg.ANYNET.BOT_MULS,\n gws=regnet_cfg.ANYNET.GROUP_WS,\n se_r=regnet_cfg.ANYNET.SE_R if regnet_cfg.ANYNET.SE_ON else None,\n )\n self.apply(init_weights)\n\n def construct(self, stem_type, stem_w, block_type, ds, ws, ss, bn_norm, bms, gws, se_r):\n # Generate dummy bot muls and gs for models that do not use them\n bms = bms if bms else [1.0 for _d in ds]\n gws = gws if gws else [1 for _d in ds]\n # Group params by stage\n stage_params = list(zip(ds, ws, ss, bms, gws))\n # Construct the stem\n stem_fun = get_stem_fun(stem_type)\n self.stem = stem_fun(3, stem_w, bn_norm)\n # Construct the stages\n block_fun = get_block_fun(block_type)\n prev_w = stem_w\n for i, (d, w, s, bm, gw) in enumerate(stage_params):\n self.add_module(\n \"s{}\".format(i + 1), AnyStage(prev_w, w, s, bn_norm, d, block_fun, bm, gw, se_r)\n )\n prev_w = w\n # Construct the head\n self.in_planes = prev_w\n # self.head = AnyHead(w_in=prev_w, nc=nc)\n\n def forward(self, x):\n for module in self.children():\n x = module(x)\n return x\n\n\ndef quantize_float(f, q):\n \"\"\"Converts a float to closest non-zero int divisible by q.\"\"\"\n return int(round(f / q) * q)\n\n\ndef adjust_ws_gs_comp(ws, bms, gs):\n \"\"\"Adjusts the compatibility of widths and groups.\"\"\"\n ws_bot = [int(w * b) for w, b in zip(ws, bms)]\n gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]\n ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]\n ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]\n return ws, gs\n\n\ndef get_stages_from_blocks(ws, rs):\n \"\"\"Gets ws/ds of network at each stage from per block values.\"\"\"\n ts_temp = zip(ws + [0], [0] + ws, rs + [0], [0] + rs)\n ts = [w != wp or r != rp for w, wp, r, rp in ts_temp]\n s_ws = [w for w, t in zip(ws, ts[:-1]) if t]\n s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist()\n return s_ws, s_ds\n\n\ndef generate_regnet(w_a, w_0, w_m, d, q=8):\n \"\"\"Generates per block ws from RegNet parameters.\"\"\"\n assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0\n ws_cont = np.arange(d) * w_a + w_0\n ks = np.round(np.log(ws_cont / w_0) / np.log(w_m))\n ws = w_0 * np.power(w_m, ks)\n ws = np.round(np.divide(ws, q)) * q\n num_stages, max_stage = len(np.unique(ws)), ks.max() + 1\n ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist()\n return ws, num_stages, max_stage, ws_cont\n\n\nclass RegNet(AnyNet):\n \"\"\"RegNet model.\"\"\"\n\n def __init__(self, last_stride, bn_norm):\n # Generate RegNet ws per block\n b_ws, num_s, _, _ = generate_regnet(\n regnet_cfg.REGNET.WA, regnet_cfg.REGNET.W0, regnet_cfg.REGNET.WM, regnet_cfg.REGNET.DEPTH\n )\n # Convert to per stage format\n ws, ds = get_stages_from_blocks(b_ws, b_ws)\n # Generate group widths and bot muls\n gws = [regnet_cfg.REGNET.GROUP_W for _ in range(num_s)]\n bms = [regnet_cfg.REGNET.BOT_MUL for _ in range(num_s)]\n # Adjust the compatibility of ws and gws\n ws, gws = adjust_ws_gs_comp(ws, bms, gws)\n # Use the same stride for each stage\n ss = [regnet_cfg.REGNET.STRIDE for _ in range(num_s)]\n ss[-1] = last_stride\n # Use SE for RegNetY\n se_r = regnet_cfg.REGNET.SE_R if regnet_cfg.REGNET.SE_ON else None\n # Construct the model\n kwargs = {\n \"stem_type\": regnet_cfg.REGNET.STEM_TYPE,\n \"stem_w\": regnet_cfg.REGNET.STEM_W,\n \"block_type\": regnet_cfg.REGNET.BLOCK_TYPE,\n \"ss\": ss,\n \"ds\": ds,\n \"ws\": ws,\n \"bn_norm\": bn_norm,\n \"bms\": bms,\n \"gws\": gws,\n \"se_r\": se_r,\n }\n super(RegNet, self).__init__(**kwargs)\n\n\ndef init_pretrained_weights(key):\n \"\"\"Initializes model with pretrained weights.\n\n Layers that don't match with pretrained layers in name or size are kept unchanged.\n \"\"\"\n import os\n import errno\n import gdown\n\n def _get_torch_home():\n ENV_TORCH_HOME = 'TORCH_HOME'\n ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'\n DEFAULT_CACHE_DIR = '~/.cache'\n torch_home = os.path.expanduser(\n os.getenv(\n ENV_TORCH_HOME,\n os.path.join(\n os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'\n )\n )\n )\n return torch_home\n\n torch_home = _get_torch_home()\n model_dir = os.path.join(torch_home, 'checkpoints')\n try:\n os.makedirs(model_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # Directory already exists, ignore.\n pass\n else:\n # Unexpected OSError, re-raise.\n raise\n\n filename = model_urls[key].split('/')[-1]\n\n cached_file = os.path.join(model_dir, filename)\n\n if not os.path.exists(cached_file):\n if comm.is_main_process():\n gdown.download(model_urls[key], cached_file, quiet=False)\n\n comm.synchronize()\n\n logger.info(f\"Loading pretrained model from {cached_file}\")\n state_dict = torch.load(cached_file, map_location=torch.device('cpu'))['model_state']\n\n return state_dict\n\n\n@BACKBONE_REGISTRY.register()\ndef build_regnet_backbone(cfg):\n # fmt: off\n pretrain = cfg.MODEL.BACKBONE.PRETRAIN\n pretrain_path = cfg.MODEL.BACKBONE.PRETRAIN_PATH\n last_stride = cfg.MODEL.BACKBONE.LAST_STRIDE\n bn_norm = cfg.MODEL.BACKBONE.NORM\n depth = cfg.MODEL.BACKBONE.DEPTH\n # fmt: on\n\n cfg_files = {\n '800x': 'fastreid/modeling/backbones/regnet/regnetx/RegNetX-800MF_dds_8gpu.yaml',\n '800y': 'fastreid/modeling/backbones/regnet/regnety/RegNetY-800MF_dds_8gpu.yaml',\n '1600x': 'fastreid/modeling/backbones/regnet/regnetx/RegNetX-1.6GF_dds_8gpu.yaml',\n '1600y': 'fastreid/modeling/backbones/regnet/regnety/RegNetY-1.6GF_dds_8gpu.yaml',\n '3200x': 'fastreid/modeling/backbones/regnet/regnetx/RegNetX-3.2GF_dds_8gpu.yaml',\n '3200y': 'fastreid/modeling/backbones/regnet/regnety/RegNetY-3.2GF_dds_8gpu.yaml',\n '4000x': 'fastreid/modeling/backbones/regnet/regnety/RegNetX-4.0GF_dds_8gpu.yaml',\n '4000y': 'fastreid/modeling/backbones/regnet/regnety/RegNetY-4.0GF_dds_8gpu.yaml',\n '6400x': 'fastreid/modeling/backbones/regnet/regnetx/RegNetX-6.4GF_dds_8gpu.yaml',\n '6400y': 'fastreid/modeling/backbones/regnet/regnety/RegNetY-6.4GF_dds_8gpu.yaml',\n }[depth]\n\n regnet_cfg.merge_from_file(cfg_files)\n model = RegNet(last_stride, bn_norm)\n\n if pretrain:\n # Load pretrain path if specifically\n if pretrain_path:\n try:\n state_dict = torch.load(pretrain_path, map_location=torch.device('cpu'))\n logger.info(f\"Loading pretrained model from {pretrain_path}\")\n except FileNotFoundError as e:\n logger.info(f'{pretrain_path} is not found! Please check this path.')\n raise e\n except KeyError as e:\n logger.info(\"State dict keys error! Please check the state dict.\")\n raise e\n else:\n key = depth\n state_dict = init_pretrained_weights(key)\n\n incompatible = model.load_state_dict(state_dict, strict=False)\n if incompatible.missing_keys:\n logger.info(\n get_missing_parameters_message(incompatible.missing_keys)\n )\n if incompatible.unexpected_keys:\n logger.info(\n get_unexpected_parameters_message(incompatible.unexpected_keys)\n )\n return model\n"
] |
[
[
"numpy.log",
"torch.empty",
"numpy.power",
"numpy.unique",
"numpy.arange",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.device",
"torch.nn.ReLU",
"numpy.divide"
]
] |
alivcor/aes
|
[
"f6bb9a6da771db6e786fe00e25d1217786e2b21b"
] |
[
"DeepScore/testing.py"
] |
[
"import EventIssuer\nimport progressbar\nimport numpy\nimport re\nfrom scipy import spatial\n\nbar = progressbar.ProgressBar()\n\ndef loadCompleteGloveModel(logfilename):\n global bar\n model = {}\n EventIssuer.issueMessage(\"Loading GLoVE Word Vectors. This will take a while.\", logfilename)\n EventIssuer.issueSleep(\"Turning to sleep mode.\", logfilename)\n f = open(\"/glove_vectors/glove.42B.300d.txt\", 'r').readlines()\n for i in bar(range(len(f))):\n line = f[i]\n splitLine = line.split()\n word = splitLine[0]\n embedding = [float(val) for val in splitLine[1:]]\n model[word] = embedding\n EventIssuer.issueSuccess(\"Loaded GLoVE Word Vectors\", logfilename)\n EventIssuer.issueMessage(len(model) + \" words loaded.\", logfilename)\n return model\n\ndef getWordVec(word, logfilename):\n word = word.strip().lower()\n # EventIssuer.issueMessage(\"Word2Vec - Lookup : \" + word, logfilename)\n with open(\"/glove_vectors/glove.42B.300d.txt\", 'r') as f:\n for line in f:\n if word[0] == line[0]:\n if word in line:\n splitLine = line.split()\n if word == splitLine[0]:\n embedding = [float(val) for val in splitLine[1:]]\n # EventIssuer.issueSuccess(\"Word2Vec - Found WordVec ! \" + splitLine[0], logfilename)\n return numpy.array(embedding)\n EventIssuer.issueWarning(\"Word2Vec - Primary loopkup failed. Trying advanced lookup : \" + word, logfilename)\n word = re.sub('[^a-z]+', ' ', word)\n words = word.split()\n EventIssuer.issueMessage(\"Word2Vec - Advanced Lookup identifies the presence of these words in the clump : \" + str(words), logfilename)\n EventIssuer.issueSharpAlert(\"Word2Vec - Returning a list of embedding vectors instead of just one: \" + str(words), logfilename)\n wvecs = []\n for word in words:\n iffound = False\n # EventIssuer.issueMessage(\"Word2Vec - Lookup : \" + word, logfilename)\n with open(\"/glove_vectors/glove.42B.300d.txt\", 'r') as f:\n for line in f:\n if word[0] == line[0]:\n if word in line:\n splitLine = line.split()\n if word == splitLine[0]:\n embedding = [float(val) for val in splitLine[1:]]\n # EventIssuer.issueSuccess(\"Word2Vec - Found WordVec ! \" + splitLine[0], logfilename)\n iffound = True\n wvecs.append(numpy.array(embedding))\n if not iffound:\n EventIssuer.issueError(\"Word2Vec - Not found : \" + word, logfilename)\n EventIssuer.issueError(\"Word2Vec - returning None\", logfilename)\n wvecs.append(numpy.array([-0.0071971419, 0.038034291, 0.067505044, 0.090004129, -0.1862136, 0.0477487641, 0.6711488, -0.250773268, 0.075999694, 0.219727019, -0.148549504, -0.085121506, 0.0734719341, -0.07891803, 0.028762588, 0.095495766, 0.1527964985, -0.019069604, -0.000159386, -0.014289429, -0.020020568, -0.0305131702, -0.009426722, -0.047993677, -0.0303836513, 0.116149094, 0.003881902, 0.067539798, 0.065941054, 0.076046747, 0.1834477877, 0.006496807, 0.0050932692, 0.074967404, -0.00167740615, 0.0513104294, 0.033262528, 0.0552852064, -0.0307754428, 0.033744941, -0.0169357353, -0.073538496, 0.076556556, 0.0477559612, 0.030152536, -0.011619235, -0.076599137, 0.12030974, 0.120334391, -0.037826136, -0.000444319, -0.0265530467, 0.114825157, -0.0784206728, -0.000263432, 0.036910476, -0.08650113, -0.059461685, 0.048943902, 0.009627125, -0.0007149627, 0.0495817, 0.007380195, -0.070054098, 0.020593077, 0.014643805, -0.002186668, -0.08610112338, -0.017638128, 0.0062037199, 0.1401558776, -0.0838177376, -0.069796513, 0.0320815074, 0.048048583, 0.1172412428, -0.021675258, -0.1057215154, -0.016556235, 0.054823004, -0.0677215806, 0.303310216, -0.03984594, 0.095258791, -0.029040644, -0.10513797, -0.088345066, 0.003876219, -0.01790383, -0.003906333, 0.011204635, 0.1339004495, 0.013769692, 0.017227031, -0.045316618, 0.22964551, 0.58661061, 0.018802402, -0.087426098, -0.04114762, 0.05271822, -0.1029932545, -0.101448259, -0.0051680928, 0.024561747, 0.051917509, -0.06979360401, 0.029217429, -0.01423352, 0.0332625319, 0.051348392, -0.017232962, -0.087031422, -0.025480295, 0.17517301, -0.1112582922, -0.198503899, -0.080264611, -0.0794522566, -0.106146675059, 0.1042043764, 0.03419873, 0.02528134, 0.139665547, -0.054239325, 0.003317982, 0.01446093, 0.055904408, 0.050255285, 0.06597666, -0.0027891771, -0.057818754, 0.050512937, 0.057041066, -0.104284014, 0.048539121, 0.0032904533, 0.036437017, -0.0066634361, -0.393345841, -0.036858317, -0.085352637, -0.03096532, 0.0578057935, 0.050518039, 0.09720068, -0.074841093, 0.02024298, -0.208579942, -0.018347674, 0.0768829309, 0.12065572, 0.018917901, 0.036955241, -0.062720403, 0.120738583, 0.076060262, -0.017255595, 0.140659706, 0.088145097, -0.07723179, 0.033444682, 0.0279491986, 0.0655360962, -0.033268223, 0.073470189, 0.047171408, 0.0065063135, -0.0410798819, 0.11516255, 0.046581279, -0.011725516, 0.0240178502, 0.043832411, 0.069651211, -0.109000856, 0.0098593491, 0.0293464748, -0.05191695, 0.05985769819, 0.0525228829, -0.110554585, -0.04699427, 0.03103713, -0.039577866, 0.0780700909, -0.06267054, 0.02366544, -0.006737524, -0.031316164, -0.0893335936, 0.04499242, -0.143521181, 0.042565344, 0.024705433, 0.024808143, -0.0258511753, -0.068910291, -0.0437374949, 0.13817618, 0.0451180032, 0.050466407, 0.003336691, -0.036905905, -0.053389866, -0.0664220802, -0.0279904458, -0.032312093, -0.070309043, 0.0791351917, -0.035841839026, 0.053526364, 0.166534373, 0.042336844, 0.011465653, 0.03067487, -0.013652506, 0.0774865, 0.064194998, 0.018029728, 0.05811997, 0.005756151, 0.0738715066, -0.004128079, 0.641443, -0.141303355, 0.057028502, 0.0141146709, 0.0160510732, 0.007878947, -0.102036389, -0.004399322, -0.085377204, 0.001418835, -0.0204011879, -0.052716754, -0.0328608991, 0.009225341, 0.090273355, 0.11482565, 0.017034033, 0.002665617, 0.147449313, -0.130160577, 0.024268086, 0.06980359, -0.084313298, 0.09074731, 0.008108039, -0.040096544, 0.0449544492, 0.003846883, 0.0984588095, -0.037729444, 0.018308231, 0.00237747, -0.017060348, 0.05470805, -0.0540356221, -0.07514963, -0.055423828, 0.069639594, 0.04987485, 0.01293406, -0.0998535, -0.002467158, 0.066136769, 0.0424389575, -0.039217423, -0.044882082, 0.0171389597, 0.00599397584, 0.141697267, -0.043119298, -0.052225656, -0.029637929, -0.09230648, -0.0460420834, -0.056552451, 0.057469622, -0.26924903, 0.116421065, 0.038252634, -0.0685027012, 0.095659097, -0.02042877532, -0.038294187, -0.089739263, 0.049288919, 0.0753936915, -0.006005079, 0.00105016, 0.121543578, 0.08678938, -0.024602735, 0.0278206119, 0.025695247, 0.039601028, 0.00177882, 0.075987566]))\n return wvecs\n\ndef getGloveVector(model, word):\n return model[word]\n\n\nrandom1 = getWordVec(\"half\", \"testing\")\nrandom2 = getWordVec(\"artery\", \"testing\")\nrandom3 = getWordVec(\"man\", \"testing\")\nrandom4 = getWordVec(\"mountain\", \"testing\")\nrandom5 = getWordVec(\"go\", \"testing\")\nrandom6 = getWordVec(\"sleeper\", \"testing\")\n\n\nstudent = getWordVec(\"student\", \"testing\")\nwfor = getWordVec(\"for\", \"testing\")\nwan = getWordVec(\"an\", \"testing\")\nanswer = getWordVec(\"answer\", \"testing\")\n\nsearched = getWordVec(\"searched\", \"testing\")\nlooked = getWordVec(\"looked\", \"testing\")\nsought = getWordVec(\"sought\", \"testing\")\n\n\nsent_vect = (student + wfor + wan + answer)/4\n\n\n\n"
] |
[
[
"numpy.array"
]
] |
shagunsodhani/consistent-dynamics
|
[
"cc1527f2468cdcebea9a57387254278eb5547fe3",
"cc1527f2468cdcebea9a57387254278eb5547fe3",
"cc1527f2468cdcebea9a57387254278eb5547fe3"
] |
[
"codes/model/expert_policy/deterministic_mlp.py",
"codes/model/imagination_model/learning_to_query_with_imitation_learning.py",
"codes/envs/wrappers/general.py"
] |
[
"import numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import Distribution\n\nfrom codes.model.expert_policy.utils import RunningMeanStd\n\nclass Dirac(Distribution):\n def __init__(self, value):\n super(Dirac, self).__init__()\n self.value = value\n\n @property\n def mean(self):\n return self.value\n\n def sample(self):\n return self.value\n\nclass DeterministicMLPPolicy(nn.Module):\n def __init__(self, observation_size, goal_size, output_size, hidden_size,\n num_layers, nonlinearity=nn.ReLU, noise_eps=0.,\n random_eps=0., max_u=1, clip_obs=200.):\n super(DeterministicMLPPolicy, self).__init__()\n self.observation_size = observation_size\n self.goal_size = goal_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.nonlinearity = nonlinearity\n self.noise_eps = noise_eps\n self.random_eps = random_eps\n self.max_u = max_u\n self.clip_obs = clip_obs\n\n layers = []\n input_size = observation_size + goal_size\n for i in range(num_layers):\n input_dim = input_size if i == 0 else hidden_size\n layers.append(nn.Linear(input_dim, hidden_size, bias=True))\n layers.append(nonlinearity())\n input_dim = input_size if num_layers == 0 else hidden_size\n layers.append(nn.Linear(input_dim, output_size, bias=True))\n self.layers = nn.Sequential(*layers)\n\n self.obs_rms = RunningMeanStd(shape=(observation_size,), clip_std=1e-2 ** 2, dtype=torch.float32)\n self.goal_rms = RunningMeanStd(shape=(goal_size,), clip_std=1e-2 ** 2, dtype=torch.float32)\n\n def forward(self, inputs):\n inputs = self._preprocess_inputs(inputs)\n action = self.layers(inputs)\n # Action post-processing\n noise = self.noise_eps * self.max_u * torch.randn(*action.shape)\n action = torch.clamp(action + noise, -self.max_u, self.max_u)\n # TODO: Handle random_eps for epsilon-greedy policies\n return Dirac(value=action)\n\n def _preprocess_inputs(self, inputs):\n # Normalize the inputs. Normalization happens in `ActorCritic`\n # in baselines' implementation of DDPG+HER\n observation = self.obs_rms(inputs['observation'])\n goal = self.goal_rms(inputs['desired_goal'])\n # QKFIX: We assume here that `relative_goals == False`, which is the\n # case for the policy trained on FetchPush\n observation = torch.clamp(observation, -self.clip_obs, self.clip_obs)\n goal = torch.clamp(goal, -self.clip_obs, self.clip_obs)\n return torch.cat((observation, goal), dim=1)\n\n def load_weights(self, config_dict):\n expert_policy_config = config_dict.model.expert_policy\n name = '{0}__{1}'.format(config_dict.env.name, expert_policy_config.name)\n\n # Load the Pytorch model\n with open(os.path.join(expert_policy_config.save_dir, '{0}.pt'.format(name)), 'wb') as f:\n self.load_state_dict(torch.load())\n\n def sample_action(self, input):\n return self.forward(input).mean.detach().numpy()\n\ndef get_model_using_config_dict(config_dict):\n expert_policy_config = config_dict.model.expert_policy\n\n model = DeterministicMLPPolicy(\n observation_size=int(np.prod(config_dict.env.observation_space.observation.shape)),\n goal_size=int(np.prod(config_dict.env.observation_space.goal.shape)),\n output_size=int(np.prod(config_dict.env.action_space.shape)),\n hidden_size=expert_policy_config.hidden_size,\n num_layers=expert_policy_config.num_layers,\n nonlinearity=nn.ReLU,\n noise_eps=expert_policy_config.noise_eps,\n random_eps=expert_policy_config.random_eps,\n max_u=expert_policy_config.max_u,\n clip_obs=expert_policy_config.clip_obs)\n\n name = '{0}__{1}'.format(config_dict.env.name, expert_policy_config.name)\n file_name = os.path.join(expert_policy_config.save_dir, '{0}.th.pt'.format(name))\n model.load_state_dict(torch.load(file_name))\n print(\"Model loaded successfully.\")\n return model\n",
"import time\nfrom itertools import chain\n\nimport torch\nfrom addict import Dict\nfrom torch import nn\n\nfrom codes.model.base_model import BaseModel\nfrom codes.model.imagination_model.util import get_component, merge_first_and_second_dim, \\\n unmerge_first_and_second_dim, sample_zt_from_distribution, clamp_mu_logsigma\nfrom codes.utils.util import get_product_of_iterable, log_pdf\n\nclass Model(BaseModel):\n \"\"\"Learning to Query w/o Imitation Learning\n This model uses the observation-dependent path\"\"\"\n\n def __init__(self, config):\n super(Model, self).__init__(config=config)\n self.convolutional_encoder = get_component(\"convolutional_encoder\", config)\n self.state_transition_model = get_component(\"stochastic_state_transition_model\", config)\n self.convolutional_decoder = get_component(\"stochastic_convolutional_decoder\", config)\n self.prior_model = get_component(\"prior_model\", config)\n self.posterior_model = get_component(\"posterior_model\", config)\n self.use_consistency_model = False\n if(self.config.model.imagination_model.consistency_model.alpha!=0.0):\n self.use_consistency_model = True\n _consistency_model_name = self.config.model.imagination_model.consistency_model.name\n self.is_consistency_model_metric_based = False\n if _consistency_model_name in (\"euclidean\", \"cosine\"):\n self.is_consistency_model_metric_based = True\n self.consistency_model = get_component(\"consistency_model.{}\".format(_consistency_model_name), config)\n\n self.use_imitation_learning_model = False\n if self.config.model.imagination_model.imitation_learning_model.should_train:\n self.use_imitation_learning_model = True\n\n if(self.use_imitation_learning_model):\n self.imitation_learning_model = get_component(\"imitation_learning_model.{}\".\n format(\n self.config.model.imagination_model.imitation_learning_model.name),\n config)\n\n\n def encode_obs(self, obs):\n obs_shape = obs.shape\n per_image_shape = obs_shape[-3:]\n batch_size = obs_shape[0]\n trajectory_length = obs_shape[1]\n num_frames = obs_shape[2]\n h_t = self.convolutional_encoder(obs.view(-1, *per_image_shape)).view(batch_size, trajectory_length, num_frames,\n -1)\n h_t = torch.mean(h_t, dim=2)\n return h_t, trajectory_length\n\n def decode_obs(self, output, trajectory_length):\n reconstructed_obs = self.convolutional_decoder(output)\n per_image_shape = reconstructed_obs.shape[-3:]\n batch_size = int(reconstructed_obs.shape[0] / trajectory_length)\n return reconstructed_obs.view(batch_size, trajectory_length, *per_image_shape)\n\n def forward(self, x):\n # not that x is same as x_(t-1)\n\n sequence_length = self.config.dataset.sequence_length\n imagination_length = self.config.dataset.imagination_length\n\n h, _ = self.encode_obs(obs=x.obs)\n output_obs = x.next_obs\n output_obs_encoding, _ = self.encode_obs(obs=output_obs.unsqueeze(2))\n output_obs_encoding = output_obs_encoding\n action = x.action\n\n open_loop_data = Dict()\n\n # Preparing input for open_loop by using a seperate namespace called as input\n index_to_select_till = sequence_length + imagination_length\n\n open_loop_data.input = Dict()\n open_loop_data.input.unroll_length = index_to_select_till\n open_loop_data.input.output_obs_encoding = output_obs_encoding[:, :index_to_select_till, :]\n\n\n open_loop_data.input.output_obs = output_obs[:, :index_to_select_till, :]\n\n if (self.use_imitation_learning_model):\n open_loop_data.input.action = action[:, 0, :]\n else:\n open_loop_data.input.action = action[:, :index_to_select_till+1, :]\n\n open_loop_data.input.h_t = h[:, 0, :]\n\n open_loop_data, imitation_learning_data = self._vectorized_open_loop_prediction(open_loop_data)\n\n if(self.use_imitation_learning_model):\n\n imitation_learning_data.action = action\n imitation_learning_output = self._prepare_imitation_learning_result_to_yield(\n self._imitation_learning_prediction(imitation_learning_data))\n\n yield imitation_learning_output\n del imitation_learning_output\n\n\n to_yield = Dict()\n to_yield.loss = open_loop_data.output.loss\n to_yield.retain_graph = False\n to_yield.description = \"open_loop\"\n\n yield to_yield\n del to_yield\n\n close_loop_data = Dict()\n close_loop_data.input = Dict()\n\n # Note that this is not a bug, we are purposefully not making the prediction over the entire sequence length\n if(sequence_length >= 2*imagination_length):\n close_loop_data.input.sequence_length = 2 * imagination_length\n elif(sequence_length >= imagination_length):\n close_loop_data.input.sequence_length = imagination_length\n else:\n close_loop_data.input.sequence_length = 1\n close_loop_data.input.imagination_length = imagination_length\n index_to_start_with = max(0, sequence_length - close_loop_data.input.sequence_length)\n index_to_select_till = index_to_start_with + close_loop_data.input.sequence_length + close_loop_data.input.imagination_length\n\n close_loop_data.input.h_t = unmerge_first_and_second_dim(open_loop_data.h_t,\n first_dim=action.shape[0])[:,index_to_start_with:index_to_select_till,:]\n\n close_loop_data.input.action = action[:, index_to_start_with:index_to_select_till, :]\n\n close_loop_data.input.output_obs = output_obs[:, index_to_start_with:index_to_select_till, :]\n\n close_loop_output, discriminator_output = self._vectorized_closed_loop_prediction(close_loop_data)\n\n output = Dict()\n output.open_loop = open_loop_data.output\n output.close_loop = close_loop_output\n output.reporting_metrics.log_likelihood = close_loop_output.likelihood.item()\n output.reporting_metrics.consistency_loss = close_loop_output.consistency_loss.item()\n output.discriminator = discriminator_output\n\n alpha = self.config.model.imagination_model.consistency_model.alpha\n loss_tuple = (output.close_loop.loss + alpha * output.close_loop.consistency_loss,\n output.discriminator.loss)\n loss_tuple = tuple(filter(lambda _loss: _loss.requires_grad, loss_tuple))\n\n to_yield = Dict()\n to_yield.loss = loss_tuple[0]\n to_yield.imagination_log_likelihood = output.reporting_metrics.log_likelihood\n to_yield.consistency_loss = output.reporting_metrics.consistency_loss\n to_yield.retain_graph = False\n to_yield.description = \"close_loop\"\n\n if (len(loss_tuple) == 1):\n yield to_yield\n\n else:\n to_yield.retain_graph = True\n yield to_yield\n\n to_yield = Dict()\n to_yield.loss = loss_tuple[1]\n to_yield.discriminator_loss = loss_tuple[1].item()\n to_yield.retain_graph = False\n to_yield.description = \"discriminator\"\n yield to_yield\n\n def _imitation_learning_prediction(self, imitation_learning_data):\n true_output = merge_first_and_second_dim(imitation_learning_data.action[:,1:,:].contiguous())\n predicted_output = imitation_learning_data.prediction\n imitation_learning_output = Dict()\n loss = self.imitation_learning_model.loss(predicted_output, true_output)\n imitation_learning_output.loss = self.config.model.imagination_model.imitation_learning_model.alpha * loss\n\n imitation_learning_output.imitation_learning_loss = loss.item()\n return imitation_learning_output\n\n def _prepare_imitation_learning_result_to_yield(self, imitation_learning_output):\n imitation_learning_output.retain_graph = True\n imitation_learning_output.imagination_log_likelihood = 0.0\n imitation_learning_output.description = \"imitation_learning\"\n return imitation_learning_output\n\n def _vectorized_open_loop_prediction(self, open_loop_data):\n # This is a simple implementation of the open loop prediction. This function pulls some operations outside the\n # for-loop and vectorizes them. This is meant as the primary function for doing open-loop prediction.\n # Open loop\n\n unroll_length = open_loop_data.input.unroll_length\n output_obs_encoding = open_loop_data.input.output_obs_encoding\n output_obs = open_loop_data.input.output_obs\n action = open_loop_data.input.action\n if(self.use_imitation_learning_model):\n # First action\n a_t = action\n\n else:\n a_t = action[:, 0, :]\n\n # First state encoding\n h_t = open_loop_data.input.h_t\n\n self.state_transition_model.set_state(h_t)\n\n # Note that this datastructure is used as a container for variables to track. It helps to avoid writing multiple\n # statements.\n temp_data = Dict()\n\n vars_to_track = [\"h_t\", \"z_t\", \"posterior_mu\", \"posterior_sigma\"]\n\n if(self.use_imitation_learning_model):\n vars_to_track.append(\"a_t\")\n\n for name in vars_to_track:\n key = name + \"_list\"\n temp_data[key] = []\n\n for t in range(0, unroll_length):\n current_output_obs_encoding = output_obs_encoding[:, t, :]\n\n posterior = self.sample_zt_from_posterior(h=h_t, a=a_t, o=current_output_obs_encoding)\n z_t = posterior.z_t\n inp = torch.cat((z_t, a_t), dim=1)\n h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)\n if(self.use_imitation_learning_model):\n a_t = self.imitation_learning_model(torch.cat((h_t, z_t), dim=1))\n else:\n a_t = action[:, t+1, :]\n posterior_mu = posterior.mu\n posterior_sigma = posterior.sigma\n\n for name in vars_to_track:\n key = name + \"_list\"\n temp_data[key].append(eval(name).unsqueeze(1))\n\n for name in vars_to_track:\n key = name + \"_list\"\n temp_data[name] = merge_first_and_second_dim(torch.cat(temp_data[key], dim=1))\n\n if not self.use_imitation_learning_model:\n temp_data.a_t = merge_first_and_second_dim(action[:, :unroll_length, :].contiguous())\n\n temp_data.prior = self.sample_zt_from_prior(\n h=temp_data.h_t,\n a=temp_data.a_t)\n\n likelihood_mu, likelihood_sigma = self.convolutional_decoder(\n torch.cat((temp_data.h_t,\n temp_data.z_t), dim=1))\n\n elbo_prior = log_pdf(temp_data.z_t, temp_data.prior.mu, temp_data.prior.sigma)\n elbo_q_likelihood = log_pdf(temp_data.z_t, temp_data.posterior_mu,\n temp_data.posterior_sigma)\n elbo_likelihood = log_pdf(merge_first_and_second_dim(output_obs.contiguous()),\n likelihood_mu, likelihood_sigma)\n elbo = sum([torch.mean(x) for x in (\n elbo_likelihood, elbo_prior, -elbo_q_likelihood)])\n open_loop_data.output = Dict()\n open_loop_data.output.loss = -elbo\n open_loop_data.output.log_likelihood = torch.mean(elbo_likelihood)\n open_loop_data.h_t = temp_data.h_t.detach()\n\n imitation_learning_data = Dict()\n imitation_learning_data.prediction = temp_data.a_t\n\n return open_loop_data, imitation_learning_data\n\n def _vectorized_closed_loop_prediction(self, close_loop_data):\n # This is a simple implementation of the close loop prediction. This function pulls some operations outside the\n # for-loop and vectorizes them. This is meant as the primary function for doing close-loop prediction.\n # Closed Loop\n\n sequence_length = close_loop_data.input.sequence_length\n imagination_length = close_loop_data.input.imagination_length\n output_obs = close_loop_data.input.output_obs\n action = close_loop_data.input.action.contiguous()\n a_t = merge_first_and_second_dim(action[:, :sequence_length, :].contiguous())\n true_h_t = close_loop_data.input.h_t\n h_t = true_h_t[:, :sequence_length, :]\n h_t = merge_first_and_second_dim(h_t.contiguous())\n\n self.state_transition_model.set_state(h_t)\n elbo_likelihood = []\n consistency_loss = Dict()\n consistency_loss.discriminator = []\n consistency_loss.close_loop = []\n h_t_from_close_loop = None\n\n\n for t in range(0, imagination_length):\n\n prior = self.sample_zt_from_prior(h=h_t, a=a_t)\n z_t = prior.z_t\n inp = torch.cat((z_t, a_t), dim=1)\n h_t = self.state_transition_model(inp.unsqueeze(1)).squeeze(1)\n if(self.use_imitation_learning_model):\n a_t = self.imitation_learning_model(torch.cat((h_t, z_t), dim=1))\n\n else:\n a_t = merge_first_and_second_dim(action[:, t+1:t + sequence_length+1, :].contiguous())\n\n h_t_from_open_loop = true_h_t[:, t + 1:t + sequence_length + 1, :]\n h_t_from_close_loop = h_t\n\n if(self.use_consistency_model):\n\n if (self.is_consistency_model_metric_based):\n h_t_from_open_loop = merge_first_and_second_dim(h_t_from_open_loop.contiguous())\n\n else:\n h_t_from_close_loop = unmerge_first_and_second_dim(h_t_from_close_loop,\n first_dim=-1,\n second_dim=sequence_length)\n\n loss_close_loop, loss_discriminator = self.consistency_model((h_t_from_open_loop, h_t_from_close_loop))\n consistency_loss.close_loop.append(loss_close_loop)\n consistency_loss.discriminator.append(loss_discriminator)\n\n likelihood_mu, likelihood_sigma = self.convolutional_decoder(\n torch.cat((h_t, z_t), dim=1))\n\n elbo_likelihood.append(\n log_pdf(merge_first_and_second_dim(output_obs[:, t:t + sequence_length, :].contiguous()),\n likelihood_mu,\n likelihood_sigma))\n\n elbo_likelihood = list(map(lambda x: torch.mean(x).unsqueeze(0), elbo_likelihood))\n elbo_likelihood = torch.mean(torch.cat(elbo_likelihood))\n\n for key in consistency_loss:\n if consistency_loss[key]:\n # Checking if the list is non-empty\n consistency_loss[key] = torch.mean(torch.cat(consistency_loss[key]))\n else:\n consistency_loss[key] = torch.tensor(0.0).to(device=elbo_likelihood.device)\n\n\n close_loop_output = Dict()\n close_loop_output.loss = -elbo_likelihood\n close_loop_output.likelihood = elbo_likelihood\n close_loop_output.consistency_loss = consistency_loss.close_loop\n discriminator_output = Dict()\n discriminator_output.loss = consistency_loss.discriminator\n\n return close_loop_output, discriminator_output\n\n\n def sample_zt_from_prior(self, h, a):\n mu, logsigma = self.prior_model(torch.cat((h, a), dim=1))\n mu, logsigma = clamp_mu_logsigma(mu, logsigma)\n return sample_zt_from_distribution(mu, logsigma)\n\n def sample_zt_from_posterior(self, h, a, o):\n mu, logsigma = self.posterior_model(torch.cat((h, a, o), dim=1))\n mu, logsigma = clamp_mu_logsigma(mu, logsigma)\n return sample_zt_from_distribution(mu, logsigma)\n\n def get_optimizers(self):\n '''Method to return the list of optimizers for the model'''\n optimizers = []\n model_params = []\n if(self.use_imitation_learning_model):\n imitation_learning_model_params = list(self.get_imitation_learning_model_params())\n model_params.append(imitation_learning_model_params)\n open_loop_params = list(self.get_open_loop_params())\n model_params.append(open_loop_params)\n close_loop_params = list(self.get_close_loop_params())\n model_params.append(close_loop_params)\n if(self.use_consistency_model):\n consistency_model_params = list(self.get_consistency_model_params())\n model_params.append(consistency_model_params)\n optimizers = tuple(map(self._register_params_to_optimizer, filter(lambda x: x, model_params)))\n if (optimizers):\n return optimizers\n return None\n\n def get_open_loop_params(self):\n # Method to get params which are to be updated with the open loop\n open_loop_models = (self.convolutional_encoder,\n self.state_transition_model,\n self.convolutional_decoder,\n self.prior_model,\n self.posterior_model)\n open_loop_params = tuple(map(lambda model: model.get_model_params(), open_loop_models))\n return chain(*open_loop_params)\n\n def get_close_loop_params(self):\n # Method to get params which are to be updated with the close loop\n close_loop_models = [self.state_transition_model,]\n if(self.use_imitation_learning_model):\n close_loop_models.append(self.imitation_learning_model)\n close_loop_params = tuple(map(lambda model: model.get_model_params(), close_loop_models))\n return chain(*close_loop_params)\n\n def get_consistency_model_params(self):\n # Method to get params which are to be updated with the consistency model\n consistency_models = (self.consistency_model,)\n consistency_model_params = tuple(map(lambda model: model.get_model_params(), consistency_models))\n return chain(*consistency_model_params)\n\n def get_imitation_learning_model_params(self):\n # Method to get params which are to be updated with the imitation learning model\n imitation_learning_models = (self.imitation_learning_model,)\n imitation_learning_model_params = tuple(map(lambda model: model.get_model_params(), imitation_learning_models))\n return chain(*imitation_learning_model_params)\n\n",
"import numpy as np\nimport gym\nfrom gym import spaces\nfrom collections import deque\n\nclass FramesStack(gym.Wrapper):\n def __init__(self, env, num_stack=4):\n super(FramesStack, self).__init__(env)\n self.num_stack = num_stack\n self.obs_shape = self.env.observation_space.shape\n self._buffer = deque([], maxlen=self.num_stack)\n self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8,\n shape=(self.num_stack * self.obs_shape[0],) + self.obs_shape[1:])\n\n def _get_observation(self):\n assert len(self._buffer) == self.num_stack\n return LazyFrames(list(self._buffer))\n\n def reset(self, **kwargs):\n observation, reward, done, info = self.env.reset(**kwargs)\n null_observation = np.zeros(self.obs_shape, dtype=np.uint8)\n for _ in range(self.num_stack - 1):\n self._buffer.append(null_observation)\n self._buffer.append(observation)\n # reward = None\n # done = None\n # info = {\n # \"reward_run\": None,\n # \"reward_ctrl\": None,\n # \"state\": None\n # }\n return (self._get_observation(), reward, done, info)\n\n def step(self, action, **kwargs):\n observation, reward, done, info = self.env.step(action, **kwargs)\n self._buffer.append(observation)\n return (self._get_observation(), reward, done, info)\n\nclass RollAxisObservationWrapper(gym.ObservationWrapper):\n def __init__(self, env):\n super(RollAxisObservationWrapper, self).__init__(env)\n obs_shape = self.env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8,\n shape=(obs_shape[-1],) + obs_shape[:-1])\n\n def observation(self, observation):\n return np.rollaxis(observation, axis=2)\n\n def reset(self):\n observation, reward, done, info = self.env.reset()\n return self.observation(observation), reward, done, info\n\nclass LazyFrames(object):\n def __init__(self, frames):\n self._frames = frames\n self._out = None\n\n @property\n def out(self):\n if self._out is None:\n self._out = np.concatenate(list(map(lambda frame: np.expand_dims(frame, axis=0), self._frames)), axis=0)\n self._frames = None\n return self._out\n \n def __array__(self, dtype=None):\n out = self.out\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n if self._frames is None:\n return len(self.out)\n else:\n return len(self._frames)\n"
] |
[
[
"torch.nn.Sequential",
"torch.load",
"torch.cat",
"torch.randn",
"torch.nn.Linear",
"numpy.prod",
"torch.clamp"
],
[
"torch.tensor",
"torch.mean",
"torch.cat"
],
[
"numpy.rollaxis",
"numpy.expand_dims",
"numpy.zeros"
]
] |
HandsomeBrotherShuaiLi/FaceTask
|
[
"19bd202e92f0a394b4c8f2860ba7e088f2f5b573"
] |
[
"libs/EfficientDet/augmentor/color.py"
] |
[
"import numpy as np\nfrom PIL import Image, ImageEnhance, ImageOps\n\n\ndef autocontrast(image, prob=0.5):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n image = Image.fromarray(image)\n image = ImageOps.autocontrast(image)\n image = np.array(image)\n return image\n\n\ndef equalize(image, prob=0.5):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n image = Image.fromarray(image)\n image = ImageOps.equalize(image)\n image = np.array(image)\n return image\n\n\ndef solarize(image, prob=0.5, threshold=128.):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n image = Image.fromarray(image)\n image = ImageOps.solarize(image, threshold=threshold)\n image = np.array(image)\n return image\n\n\ndef sharpness(image, prob=0.5, min=0, max=2, factor=None):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n if factor is None:\n # 0 模糊一点, 1 原图, 2 清晰一点\n factor = np.random.uniform(min, max)\n image = Image.fromarray(image)\n enhancer = ImageEnhance.Sharpness(image)\n image = enhancer.enhance(factor=factor)\n return np.array(image)\n\n\ndef color(image, prob=0.5, min=0., max=1., factor=None):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n if factor is None:\n # factor=0 返回黑白色, factor=1 返回原图\n factor = np.random.uniform(min, max)\n image = Image.fromarray(image)\n enhancer = ImageEnhance.Color(image)\n image = enhancer.enhance(factor=factor)\n return np.array(image)\n\n\ndef contrast(image, prob=0.5, min=0.2, max=1., factor=None):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n if factor is None:\n # factor=0 返回灰色, factor=1 返回原图\n factor = np.random.uniform(min, max)\n image = Image.fromarray(image)\n enhancer = ImageEnhance.Contrast(image)\n image = enhancer.enhance(factor=factor)\n return np.array(image)\n\n\ndef brightness(image, prob=0.5, min=0.8, max=1., factor=None):\n random_prob = np.random.uniform()\n if random_prob > prob:\n return image\n if factor is None:\n # factor=0 返回全黑色, factor=1 返回原图\n factor = np.random.uniform(min, max)\n image = Image.fromarray(image)\n enhancer = ImageEnhance.Brightness(image)\n image = enhancer.enhance(factor=factor)\n return np.array(image)\n\n\nclass VisualEffect:\n \"\"\"\n Struct holding parameters and applying image color transformation.\n\n Args\n solarize_threshold:\n color_factor: A factor for adjusting color.\n contrast_factor: A factor for adjusting contrast.\n brightness_factor: A factor for adjusting brightness.\n sharpness_factor: A factor for adjusting sharpness.\n \"\"\"\n\n def __init__(\n self,\n color_factor=None,\n contrast_factor=None,\n brightness_factor=None,\n sharpness_factor=None,\n color_prob=0.5,\n contrast_prob=0.5,\n brightness_prob=0.5,\n sharpness_prob=0.5,\n autocontrast_prob=0.5,\n equalize_prob=0.5,\n solarize_prob=0.1,\n solarize_threshold=128.,\n\n ):\n self.color_factor = color_factor\n self.contrast_factor = contrast_factor\n self.brightness_factor = brightness_factor\n self.sharpness_factor = sharpness_factor\n self.color_prob = color_prob\n self.contrast_prob = contrast_prob\n self.brightness_prob = brightness_prob\n self.sharpness_prob = sharpness_prob\n self.autocontrast_prob = autocontrast_prob\n self.equalize_prob = equalize_prob\n self.solarize_prob = solarize_prob\n self.solarize_threshold = solarize_threshold\n\n def __call__(self, image):\n \"\"\"\n Apply a visual effect on the image.\n\n Args\n image: Image to adjust\n \"\"\"\n random_enhance_id = np.random.randint(0, 4)\n if random_enhance_id == 0:\n image = color(image, prob=self.color_prob, factor=self.color_factor)\n elif random_enhance_id == 1:\n image = contrast(image, prob=self.contrast_prob, factor=self.contrast_factor)\n elif random_enhance_id == 2:\n image = brightness(image, prob=self.brightness_prob, factor=self.brightness_factor)\n else:\n image = sharpness(image, prob=self.sharpness_prob, factor=self.sharpness_factor)\n\n random_ops_id = np.random.randint(0, 3)\n if random_ops_id == 0:\n image = autocontrast(image, prob=self.autocontrast_prob)\n elif random_ops_id == 1:\n image = equalize(image, prob=self.equalize_prob)\n else:\n image = solarize(image, prob=self.solarize_prob, threshold=self.solarize_threshold)\n return image\n"
] |
[
[
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] |
IceCrew-Source/BachelorDIM-Lectures-Algorithms-2020
|
[
"95d2761883feebada25f62c20bdfe405a1353f61"
] |
[
"sessions/session_2/test_S2.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 16 14:02:38 2020\n\n@author: viardcrl\n\"\"\"\n\nimport pytest as pt\nimport S1_algotools as s1\nimport numpy as np\n\n#------------------------------------------------------------------------#\n#------------------------------ EXERCISE 1 ------------------------------#\n#------------------------------------------------------------------------#\n\ndef test_average_above_zero_0():\n assert s1.average_above_zero([2,2]) == 2\n\ndef test_average_above_zero_1():\n with pt.raises(ZeroDivisionError):\n s1.average_above_zero([0])\n \ndef test_average_above_zero_2():\n with pt.raises(Exception):\n assert s1.average_above_zero([-1, 2]) == 2\n \ndef test_average_above_zero_3():\n with pt.raises(TypeError):\n s1.average_above_zero(['n'])\n \ndef test_average_above_zero_4():\n with pt.raises(TypeError):\n s1.average_above_zero([None])\n \n#------------------------------------------------------------------------#\n#------------------------------ EXERCISE 2 ------------------------------#\n#------------------------------------------------------------------------#\n \ndef test_max_value_0():\n assert s1.max_value([1,5,3,4,9,5]) == 4\n \ndef test_max_value_1():\n assert s1.max_value([1.4,5.3,3.2,4.5,9.9,5.2]) == 4\n \ndef test_max_value_2():\n with pt.raises(TypeError):\n assert s1.max_value([1,5,'n'])\n \ndef test_max_value_3():\n with pt.raises(TypeError):\n assert s1.max_value([2,None])\n \ndef test_max_value_4():\n assert s1.max_value([0,0,0,0,0]) == 0\n \n#------------------------------------------------------------------------#\n#------------------------------ EXERCISE 3 ------------------------------#\n#------------------------------------------------------------------------#\n \ndef test_reverse_table_0():\n assert s1.reverse_table([1,5,3,4,9,5]) == [5,9,4,3,5,1]\n \ndef test_reverse_table_1():\n assert s1.reverse_table([0,0,0,0,0]) == [0,0,0,0,0]\n \ndef test_reverse_table_2():\n with pt.raises(TypeError):\n assert s1.reverse_table([1,5,'n'])\n \ndef test_reverse_table_3():\n with pt.raises(TypeError):\n assert s1.reverse_table([2,None])\n \ndef test_reverse_table_4():\n assert s1.reverse_table([1.1,2,9,4.2]) == [4.2,9,2,1.1]\n \n#------------------------------------------------------------------------#\n#------------------------------ EXERCISE 4 ------------------------------#\n#------------------------------------------------------------------------#\n \n@pt.fixture\ndef zero_numpy():\n zero_numpy = np.zeros((10,10), dtype=float)\n return zero_numpy\n\n@pt.fixture\ndef zero_numpy_with_one(zero_numpy):\n zero_numpy[2:5, 2:5] = np.ones((3,3), dtype=float)\n return zero_numpy\n\ndef test_roi_bbox(zero_numpy_with_one, zero_numpy):\n assert s1.roi_bbox(zero_numpy) == ([2,2],[4,4])"
] |
[
[
"numpy.zeros",
"numpy.ones"
]
] |
The0nix/QuartzNet
|
[
"55613fde1c7d0fca2e81854d5c5c80a771126873"
] |
[
"src/core/model.py"
] |
[
"from typing import Union, Collection\n\nimport torch.nn as nn\n\n\nclass QuartzBlock(nn.Module):\n \"\"\"\n Basic block of QuartzNet consisting of Separable Convolution, BatchNorm and ReLU repeating R times\n :\n :param C_in: number of input channels\n :param C: C from paper (Channels) -- number of output channels\n :param K: K from paper (Kernel) -- size of kernels\n :param R: R from paper (Repeats) -- number of repetitions of block constituents\n \"\"\"\n def __init__(self, C_in: int, C: int, K: int, R: int) -> None:\n super().__init__()\n self.R = R\n self.blocks = nn.ModuleList([\n nn.ModuleList([\n # TODO: Is this really \"time-channel separable conv\"?\n nn.Conv1d(C_in if i == 0 else C, C, # C_in inputs for first, C for others\n kernel_size=K,\n groups=C_in if i == 0 else C, # same\n padding=K // 2),\n nn.Conv1d(C, C, kernel_size=1),\n nn.BatchNorm1d(C),\n nn.ReLU(),\n ])\n for i in range(R)\n ])\n self.res_conv = nn.Sequential( # convolution for residual\n nn.Conv1d(C_in, C, kernel_size=1),\n nn.BatchNorm1d(C),\n )\n\n def forward(self, x):\n x_initial = x\n for i, block in enumerate(self.blocks):\n for j, layer in enumerate(block):\n if not (i == len(self.blocks) - 1 and j == len(block) - 1): # If not last ReLU\n x = layer(x)\n else:\n # Pass residual\n x = x + self.res_conv(x_initial)\n x = layer(x)\n return x\n\n\nclass QuartzNet(nn.Module):\n \"\"\"\n QuartzNet ASR model combining QuartzBlocks and CTC\n :param C_in: number of input channels\n :param Ss: iterable of 5 values designating repetitions of each B_i block or integer if all repetitions are the same\n :param Cs: Output channels in blocks\n :param Ks: Kernel sizes in blocks\n :param Rs: Number of repetitions inside of each block\n :param n_labels: number of output labels\n \"\"\"\n def __init__(self, C_in, n_labels: int, Cs: Collection, Ks: Collection, Rs: Collection,\n Ss: Union[Collection, int]) -> None:\n super().__init__()\n assert isinstance(Ss, int) or len(Ss) == 5, \"Ss must be an int or collection of length 5\"\n assert len(Cs) == 5, \"Cs must be a collection of length 5\"\n assert len(Ks) == 5, \"Cs must be a collection of length 5\"\n assert len(Rs) == 5, \"Cs must be a collection of length 5\"\n if isinstance(Ss, int):\n Ss = [Ss] * 5\n self.n_labels = n_labels\n\n self.C1 = nn.Sequential(\n nn.Conv1d(C_in, 256, groups=C_in, kernel_size=33, padding=33 // 2, stride=2),\n nn.Conv1d(256, 256, kernel_size=1),\n nn.BatchNorm1d(256),\n nn.ReLU()\n )\n\n self.Bs = nn.Sequential(\n *[\n QuartzBlock(C_in_ if i == 0 else C, C, K, R) # C_in if first out of S repetitions else C\n for C_in_, C, K, R, S in zip([256] + Cs[:-1], Cs, Ks, Rs, Ss)\n for i in range(S) # Repeat each QuartzBlock S times\n ]\n )\n\n self.C2 = nn.Sequential(\n nn.Conv1d(512, 512, groups=512, kernel_size=87, dilation=2, padding=87 - 1),\n nn.Conv1d(512, 512, kernel_size=1),\n nn.BatchNorm1d(512),\n nn.ReLU()\n )\n self.C3 = nn.Sequential(\n nn.Conv1d(512, 1024, kernel_size=1),\n nn.BatchNorm1d(1024),\n nn.ReLU()\n )\n self.C4 = nn.Sequential(\n nn.Conv1d(1024, n_labels, kernel_size=1),\n )\n\n def forward(self, x):\n x = self.C1(x)\n x = self.Bs(x)\n x = self.C2(x)\n x = self.C3(x)\n x = self.C4(x)\n return x.log_softmax(dim=1)\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.ReLU",
"torch.nn.Conv1d"
]
] |
jackonelli/dnn-mode-connectivity
|
[
"922a6a81af45e06cee4aec7e2031a55e8c660c34"
] |
[
"plane.py"
] |
[
"\"\"\"Plane\"\"\"\nimport os\nimport argparse\nimport numpy as np\nimport tabulate\nimport torch\nimport torch.nn.functional as F\n\nimport data\nimport models\nimport curves\nimport utils\n\nparser = argparse.ArgumentParser(\n description=\"Computes values for plane visualization\")\nparser.add_argument(\"--dir\",\n type=str,\n default=\"/tmp/plane\",\n metavar=\"DIR\",\n help=\"training directory (default: /tmp/plane)\")\n\nparser.add_argument(\"--grid_points\",\n type=int,\n default=21,\n metavar=\"N\",\n help=\"number of points in the grid (default: 21)\")\nparser.add_argument(\"--margin_left\",\n type=float,\n default=0.2,\n metavar=\"M\",\n help=\"left margin (default: 0.2)\")\nparser.add_argument(\"--margin_right\",\n type=float,\n default=0.2,\n metavar=\"M\",\n help=\"right margin (default: 0.2)\")\nparser.add_argument(\"--margin_bottom\",\n type=float,\n default=0.2,\n metavar=\"M\",\n help=\"bottom margin (default: 0.)\")\nparser.add_argument(\"--margin_top\",\n type=float,\n default=0.2,\n metavar=\"M\",\n help=\"top margin (default: 0.2)\")\n\nparser.add_argument(\"--curve_points\",\n type=int,\n default=61,\n metavar=\"N\",\n help=\"number of points on the curve (default: 61)\")\n\nparser.add_argument(\"--dataset\",\n type=str,\n default=\"CIFAR10\",\n metavar=\"DATASET\",\n help=\"dataset name (default: CIFAR10)\")\nparser.add_argument(\n \"--use_test\",\n action=\"store_true\",\n help=\"switches between validation and test set (default: validation)\")\nparser.add_argument(\"--transform\",\n type=str,\n default=\"VGG\",\n metavar=\"TRANSFORM\",\n help=\"transform name (default: VGG)\")\nparser.add_argument(\"--data_path\",\n type=str,\n default=None,\n metavar=\"PATH\",\n help=\"path to datasets location (default: None)\")\nparser.add_argument(\"--batch_size\",\n type=int,\n default=128,\n metavar=\"N\",\n help=\"input batch size (default: 128)\")\nparser.add_argument(\"--num_workers\",\n type=int,\n default=4,\n metavar=\"N\",\n help=\"number of workers (default: 4)\")\n\nparser.add_argument(\"--model\",\n type=str,\n default=None,\n metavar=\"MODEL\",\n help=\"model name (default: None)\")\nparser.add_argument(\"--curve\",\n type=str,\n default=None,\n metavar=\"CURVE\",\n help=\"curve type to use (default: None)\")\nparser.add_argument(\"--num_bends\",\n type=int,\n default=3,\n metavar=\"N\",\n help=\"number of curve bends (default: 3)\")\n\nparser.add_argument(\"--ckpt\",\n type=str,\n default=None,\n metavar=\"CKPT\",\n help=\"checkpoint to eval (default: None)\")\n\nparser.add_argument(\"--wd\",\n type=float,\n default=1e-4,\n metavar=\"WD\",\n help=\"weight decay (default: 1e-4)\")\n\nargs = parser.parse_args()\n\nos.makedirs(args.dir, exist_ok=True)\n\ntorch.backends.cudnn.benchmark = True\n\nloaders, num_classes = data.loaders(args.dataset,\n args.data_path,\n args.batch_size,\n args.num_workers,\n args.transform,\n args.use_test,\n shuffle_train=False)\n\narchitecture = getattr(models, args.model)\ncurve = getattr(curves, args.curve)\n\ncurve_model = curves.CurveNet(\n num_classes,\n curve,\n architecture.curve,\n args.num_bends,\n architecture_kwargs=architecture.kwargs,\n)\ncurve_model.cuda()\n\ncheckpoint = torch.load(args.ckpt)\ncurve_model.load_state_dict(checkpoint[\"model_state\"])\n\ncriterion = F.cross_entropy\nregularizer = utils.l2_regularizer(args.wd)\n\n\ndef get_xy(point, origin, vector_x, vector_y):\n return np.array(\n [np.dot(point - origin, vector_x),\n np.dot(point - origin, vector_y)])\n\n\nw = list()\ncurve_parameters = list(curve_model.net.parameters())\nfor i in range(args.num_bends):\n w.append(\n np.concatenate([\n p.data.cpu().numpy().ravel()\n for p in curve_parameters[i::args.num_bends]\n ]))\n\nprint(\"Weight space dimensionality: %d\" % w[0].shape[0])\n\nu = w[2] - w[0]\ndx = np.linalg.norm(u)\nu /= dx\n\nv = w[1] - w[0]\nv -= np.dot(u, v) * u\ndy = np.linalg.norm(v)\nv /= dy\n\nbend_coordinates = np.stack(get_xy(p, w[0], u, v) for p in w)\n\nts = np.linspace(0.0, 1.0, args.curve_points)\ncurve_coordinates = []\nfor t in np.linspace(0.0, 1.0, args.curve_points):\n weights = curve_model.weights(torch.Tensor([t]).cuda())\n curve_coordinates.append(get_xy(weights, w[0], u, v))\ncurve_coordinates = np.stack(curve_coordinates)\n\nG = args.grid_points\nalphas = np.linspace(0.0 - args.margin_left, 1.0 + args.margin_right, G)\nbetas = np.linspace(0.0 - args.margin_bottom, 1.0 + args.margin_top, G)\n\ntr_loss = np.zeros((G, G))\ntr_nll = np.zeros((G, G))\ntr_acc = np.zeros((G, G))\ntr_err = np.zeros((G, G))\n\nte_loss = np.zeros((G, G))\nte_nll = np.zeros((G, G))\nte_acc = np.zeros((G, G))\nte_err = np.zeros((G, G))\n\ngrid = np.zeros((G, G, 2))\n\nbase_model = architecture.base(num_classes, **architecture.kwargs)\nbase_model.cuda()\n\ncolumns = [\n \"X\", \"Y\", \"Train loss\", \"Train nll\", \"Train error (%)\", \"Test nll\",\n \"Test error (%)\"\n]\n\nfor i, alpha in enumerate(alphas):\n for j, beta in enumerate(betas):\n p = w[0] + alpha * dx * u + beta * dy * v\n\n offset = 0\n for parameter in base_model.parameters():\n size = np.prod(parameter.size())\n value = p[offset:offset + size].reshape(parameter.size())\n parameter.data.copy_(torch.from_numpy(value))\n offset += size\n\n utils.update_bn(loaders[\"train\"], base_model)\n\n tr_res = utils.test(loaders[\"train\"], base_model, criterion,\n regularizer)\n te_res = utils.test(loaders[\"test\"], base_model, criterion,\n regularizer)\n\n tr_loss_v, tr_nll_v, tr_acc_v = tr_res[\"loss\"], tr_res[\"nll\"], tr_res[\n \"accuracy\"]\n te_loss_v, te_nll_v, te_acc_v = te_res[\"loss\"], te_res[\"nll\"], te_res[\n \"accuracy\"]\n\n c = get_xy(p, w[0], u, v)\n grid[i, j] = [alpha * dx, beta * dy]\n\n tr_loss[i, j] = tr_loss_v\n tr_nll[i, j] = tr_nll_v\n tr_acc[i, j] = tr_acc_v\n tr_err[i, j] = 100.0 - tr_acc[i, j]\n\n te_loss[i, j] = te_loss_v\n te_nll[i, j] = te_nll_v\n te_acc[i, j] = te_acc_v\n te_err[i, j] = 100.0 - te_acc[i, j]\n\n values = [\n grid[i, j, 0], grid[i, j, 1], tr_loss[i, j], tr_nll[i, j],\n tr_err[i, j], te_nll[i, j], te_err[i, j]\n ]\n table = tabulate.tabulate([values],\n columns,\n tablefmt=\"simple\",\n floatfmt=\"10.4f\")\n if j == 0:\n table = table.split(\"\\n\")\n table = \"\\n\".join([table[1]] + table)\n else:\n table = table.split(\"\\n\")[2]\n print(table)\n\nnp.savez(os.path.join(args.dir, \"plane.npz\"),\n ts=ts,\n bend_coordinates=bend_coordinates,\n curve_coordinates=curve_coordinates,\n alphas=alphas,\n betas=betas,\n grid=grid,\n tr_loss=tr_loss,\n tr_acc=tr_acc,\n tr_nll=tr_nll,\n tr_err=tr_err,\n te_loss=te_loss,\n te_acc=te_acc,\n te_nll=te_nll,\n te_err=te_err)\n"
] |
[
[
"numpy.dot",
"numpy.linspace",
"torch.load",
"torch.Tensor",
"numpy.linalg.norm",
"numpy.stack",
"torch.from_numpy",
"numpy.zeros"
]
] |
makrinekaralas/time-series-analysis
|
[
"aea78d240710ff7949b49380e42fc1ebba2b99f5"
] |
[
"tsa/.ipynb_checkpoints/auto_arima-checkpoint.py"
] |
[
"__author__ = \"Maka Karalashvili\"\n__copyright__ = \"BMW Group\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Maka Karalashvili\"\n__email__ = \"maka.karalashvili@bmw.de\"\n__status__ = \"Development\"\n\nfrom tsa import Logger\n\nfrom tsa import UVariateTimeSeriesClass\nfrom tsa import print_attributes\n\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pmdarima as pm\nfrom time import time\n\n\nclass AutoARIMAForecaster(UVariateTimeSeriesClass):\n \"\"\"Univariate time series child class using pmdarima.auto_arima for forecasting\n\n Attributes\n ----------\n ref. to https://pypi.org/project/pmdarima/\n https://www.alkaline-ml.com/pmdarima/modules/generated/pmdarima.arima.AutoARIMA.html#pmdarima.arima.AutoARIMA\n _start_p: int\n The starting value for p\n _start_q: int\n The starting value for q\n _test: str\n Test for determining the value of d\n _max_p: int\n The maximal value for p: all values between _start_p and this one will be tried out\n _max_q: int\n The maximal value for q: all values between _start_q and this one will be tried out\n _d: int\n The maximum value of d, or the maximum number of non-seasonal differences. If None, this value will be determined.\n _seasonal: bool\n Seasonal component yes/no\n _D:\n The order of the seasonal differencing. If None, the value will automatically be selected based on\n the results of the seasonal_test.\n _start_P: int\n The starting value for P\n _start_Q: int\n The starting value for Q\n _max_P: int\n The maximum value for P\n _max_Q: int\n The maximum value for Q\n _seasonal_periods (m in original package): int\n The period for seasonal differencing, m refers to the number of periods in each season.\n For example, m is 4 for quarterly data, 12 for monthly data, or 1 for annual (non-seasonal) data.\n Default is 1. Note that if m == 1 (i.e., is non-seasonal),\n seasonal will be set to False.\n _aarima_trend: str or iterable, default=’c’, ref. http://www.alkaline-ml.com/pmdarima/1.0.0/modules/generated/pmdarima.arima.auto_arima.html\n Parameter controlling the deterministic trend polynomial A(t). Can be specified as a string where ‘c’\n indicates a constant (i.e. a degree zero component of the trend polynomial),\n ‘t’ indicates a linear trend with time, and ‘ct’ is both.\n Can also be specified as an iterable defining the polynomial as\n in numpy.poly1d, where [1,1,0,1] would denote a+bt+ct3.\n _random : bool, optional (default=False)\n Auto_arima provides the capability to perform a “random search” over a hyper-parameter space.\n If random is True, rather than perform an exhaustive search or stepwise search, only n_fits\n ARIMA models will be fit (stepwise must be False for this option to do anything).\n _n_fits : int, optional (default=10)\n If random is True and a “random search” is going to be performed, n_iter is the number of ARIMA models to be fit.\n _stepwise : bool, optional (default=True)\n Whether to use the stepwise algorithm outlined in Hyndman and Khandakar (2008) to identify the\n optimal model parameters.\n The stepwise algorithm can be significantly faster than fitting all (or a random subset of)\n hyper-parameter combinations and is less likely to over-fit the model.\n _information_criterion : str, optional (default=’aic’)\n The information criterion used to select the best ARIMA model.\n One of pmdarima.arima.auto_arima.VALID_CRITERIA, (‘aic’, ‘bic’, ‘hqic’, ‘oob’).\n _scoring : str, optional (default=’mse’)\n If performing validation (i.e., if out_of_sample_size > 0), the metric to use for scoring the\n out-of-sample data. One of {‘mse’, ‘mae’}\n _out_of_sample_size : int, optional (default=0)\n The ARIMA class can fit only a portion of the data if specified, in order to retain an “out of bag” sample score.\n This is the number of examples from the tail of the time series to hold out and use as validation examples.\n The model will not be fit on these samples, but the observations will be added into the model’s endog and exog\n arrays so that future forecast values originate from the end of the endogenous vector.\n _aarima_logger: Logger\n The logger for logging\n\n Methods\n ----------\n assertions()\n Assertion tests, must be overrided\n set_params()\n Sets new parameter values\n get_params_dict()\n Gets parameter values as a dictionary\n ts_fit()\n Fits the auto_arima model to time series\n ts_diagnose()\n Diagnoses the fitted model\n plot_residuals()\n Generates residual plots\n ts_test()\n Evaluates fitted model on the test data, if this one has been generated\n ts_forecast()\n Forecasts time series and plots the results\n plot_forecasts()\n Plots forecasted time-series\n \"\"\"\n\n def __init__(self,\n start_p=1,\n start_q=1,\n max_p=3,\n max_q=3,\n d=None,\n D=None,\n start_P=1,\n start_Q=1,\n max_P=3,\n max_Q=3,\n random = False,\n n_fits=10,\n stepwise=True,\n information_criterion='aic',\n scoring='mse',\n out_of_sample_size=0,\n **kwds):\n \"\"\"Initializes the object AutoARIMAForecaster\"\"\"\n self._aarima_logger = Logger(\"AutoARIMA\")\n self._aarima_seasonal = False\n self._aarima_trend = 'c'\n self._start_p = start_p\n self._start_q = start_q\n self._max_p = max_p\n self._max_q = max_q\n self._d = d\n self._D = D\n self._start_P = start_P\n self._start_Q = start_Q\n self._max_P = max_P\n self._max_Q = max_Q\n self._random = random\n self._n_fits = n_fits\n self._stepwise = stepwise\n self._information_criterion = information_criterion\n self._scoring = scoring\n self._out_of_sample_size = out_of_sample_size\n \n try:\n super(AutoARIMAForecaster, self).__init__(**kwds)\n except TypeError:\n self._aarima_logger.exception(\"Arguments missing...\")\n\n\n AutoARIMAForecaster._init_trend(self)\n AutoARIMAForecaster._init_seasonal(self)\n\n AutoARIMAForecaster.assertions(self)\n \n self._id = 'Auto_ARIMA'\n\n def _init_trend(self):\n if self._trend is None or self._trend == 'constant':\n self._aarima_trend = 'c'\n elif self._trend == 'linear':\n self._aarima_trend = 't'\n elif self._trend == 'constant linear':\n self._aarima_trend = 'ct'\n elif self._trend in ['additive', 'add']:\n # self._aarima_logger.warning(\"The trend \" + str(self._trend) + \" not supported by AutoARIMA! \"\n # \"Assuming first order trend\")\n self._aarima_trend = 'a+bt'\n elif self._trend in ['multiplicative', 'mul']:\n # self._aarima_logger.warning(\"The trend \" + str(self._trend) + \" not supported by AutoARIMA! \"\n # \"Assuming first order trend\")\n self._aarima_trend = 'a+bt'\n \n def _init_seasonal(self):\n if self._seasonal is None:\n self._aarima_seasonal = False\n if isinstance(self._seasonal, bool):\n self._aarima_seasonal = self._seasonal\n else:\n self._aarima_seasonal = False\n \n \n def __copy__(self):\n \"\"\"Copies the object\"\"\"\n result = super(AutoARIMAForecaster, self).__copy__()\n\n result._start_p = self._start_p\n result.start_q = self._start_q\n result._test = self._test\n result._max_p = self._max_p\n result._max_q = self._max_q\n result._d = self._d\n result._aarima_trend = self._aarima_trend\n result._aarima_seasonal = self._aarima_seasonal\n result._D = self._D\n result._start_P = self._start_P\n result._start_Q = self._start_Q\n result._max_P = self._max_P\n result._max_Q = self._max_Q\n result._random = self._random\n result._n_fits = self._n_fits\n result._stepwise = self._stepwise\n result._information_criterion = self._information_criterion\n result._scoring = self._scoring\n result._out_of_sample_size = self._out_of_sample_size\n\n result._aarima_logger = self._aarima_logger\n return result\n\n def assertions(self):\n try:\n assert self.hyper_params is None\n except AssertionError:\n self._aarima_logger.exception(\"Hyper parameters are not allowed for Auto ARIMA! \"\n \"Please specify parameters\")\n sys.exit(\"STOP\")\n\n try:\n assert self._aarima_trend is not None\n except AssertionError:\n self._aarima_logger.exception(\"Assertion Error, trend cannot be None!\")\n sys.exit(\"STOP\")\n try:\n assert isinstance(self._aarima_seasonal, bool)\n except AssertionError:\n self._aarima_logger.exception(\"Assertion Error, seasonal must be boolean True/False\")\n sys.exit(\"STOP\")\n\n def set_params(self, p_dict=None, **kwargs):\n \"\"\"Sets new parameter values\"\"\"\n params_dict = kwargs\n if p_dict is not None:\n params_dict = p_dict\n #\n for k, v in params_dict.items():\n if k == 'ts_df':\n self.ts_df = v\n elif k == 'freq':\n self.freq = v\n elif k == 'n_test':\n self.n_test = v\n elif k == 'n_val':\n self.n_val = v\n elif k == 'time_format':\n self.time_format = v\n elif k == 'start_p':\n self._start_p = v\n elif k == 'max_p':\n self._max_p = v\n elif k == 'start_q':\n self._start_q = v\n elif k == 'max_q':\n self._max_q = v\n elif k == 'd':\n self._d = v\n elif k == 'trend':\n self._aarima_trend = v\n elif k == 'seasonal':\n self._aarima_seasonal = v\n elif k == 'seasonal_periods':\n self._seasonal_periods = v\n elif k == 'start_P':\n self._start_P = v\n elif k == 'max_P':\n self._max_P = v\n elif k == 'start_Q':\n self._start_Q = v\n elif k == 'max_Q':\n self._max_Q = v\n elif k == 'D':\n self._D = v\n elif k == 'random':\n self._random = v\n elif k == 'n_fits':\n self._n_fits = v\n elif k == 'stepwise':\n self._stepwise = v\n elif k == 'information_criterion':\n self._information_criterion = v\n elif k == 'scoring':\n self._scoring = v\n elif k == 'out_of_sample_size':\n self._out_of_sample_size = v\n self.assertions()\n\n return self\n\n def get_params_dict(self):\n \"\"\"Gets parameter values as dictionary\"\"\"\n return {'start_p': self._start_p,\n 'start_q': self._start_q,\n 'test': self._test,\n 'max_p': self._max_p,\n 'max_q': self._max_q,\n 'd': self._d,\n 'trend': self._aarima_trend,\n 'seasonal': self._aarima_seasonal,\n 'seasonal_periods': self._seasonal_periods,\n 'D': self._D,\n 'start_P': self._start_P,\n 'start_Q': self._start_Q,\n 'max_P': self._max_P,\n 'max_Q': self._max_Q,\n 'random': self._random,\n 'n_fits': self._n_fits,\n 'stepwise': self._stepwise,\n 'information_criterion': self._information_criterion,\n 'scoring': self._scoring,\n 'out_of_sample_size': self. _out_of_sample_size\n }\n\n def ts_fit(self, suppress=False):\n \"\"\"Fit Auto ARIMA to the time series data.\n\n Parameters:\n ----------\n suppress: bool\n Suppress or not some of the output messages\n \"\"\"\n self._prepare_fit()\n self.ts_split()\n self._init_trend()\n self._init_seasonal()\n\n ts_df = self._train_dt.copy()\n\n \"\"\"\n Fit\n \"\"\"\n self._aarima_logger.info(\"Trying to fit the Auto ARIMA model....\")\n # tic\n start = time()\n try:\n if not suppress:\n self._aarima_logger.info(\"...via using parameters\\n\")\n print_attributes(self)\n\n self.model_fit = pm.auto_arima(ts_df,\n start_p=self._start_p,\n start_q=self._start_q,\n test=self._test,\n max_p=self._max_p,\n m=self._seasonal_periods,\n d=self._d,\n seasonal=self._aarima_seasonal,\n D=self._D,\n start_P=self._start_P,\n max_P=self._max_P,\n trend=self._aarima_trend,\n trace=True,\n error_action='ignore',\n suppress_warnings=True,\n stepwise=self._stepwise,\n random=self._random,\n n_fits=self._n_fits,\n scoring=self._scoring,\n out_of_sample_size=self._out_of_sample_size,\n information_criterion=self._information_criterion)\n except (Exception, ValueError):\n self._aarima_logger.exception(\"Exception occurred in the fit...\")\n self._aarima_logger.warning(\"Will try to reset some parameters...\")\n try:\n self.model_fit = pm.auto_arima(ts_df,\n start_p=self._start_p,\n start_q=self._start_q,\n test=self._test,\n max_p=self._max_p,\n m=1,\n d=0,\n seasonal=self._aarima_seasonal,\n D=0,\n start_P=self._start_P,\n max_P=self._max_P,\n trend=self._aarima_trend,\n trace=True,\n error_action='ignore',\n suppress_warnings=True,\n stepwise=self._stepwise,\n random=self._random,\n n_fits=self._n_fits,\n scoring=self._scoring,\n out_of_sample_size=self._out_of_sample_size,\n information_criterion=self._information_criterion)\n except (Exception, ValueError):\n self._aarima_logger.exception(\"Exception occurred\")\n self._aarima_logger.error(\"Please try other parameters!\")\n self.model_fit = None\n\n else:\n # toc\n self._aarima_logger.info(\"Time elapsed: {} sec.\".format(time() - start))\n #\n self._aarima_logger.info(\"Model successfully fitted to the data!\")\n self._aarima_logger.info(\"The chosen model AIC: \" + str(self.model_fit.aic()))\n\n # Fitted values\n self._aarima_logger.info(\"Computing fitted values and residuals...\")\n self.fittedvalues = pd.Series(self.model_fit.predict_in_sample(start=0, end=(len(ts_df) - 1)),\n index=ts_df.index)\n # Residuals\n super(AutoARIMAForecaster, self)._residuals()\n \n self._aarima_logger.info(\"Done.\")\n return self\n\n def ts_diagnose(self):\n \"\"\"Diagnose the model\"\"\"\n try:\n assert self.model_fit is not None\n except AssertionError:\n self._aarima_logger.exception(\"Model has to be fitted first! Please call ts_fit(...)\")\n sys.exit(\"STOP\")\n\n self.model_fit.plot_diagnostics(figsize=(9, 3.5))\n self.plot_residuals()\n\n def plot_residuals(self):\n \"\"\"Plot the residuals\"\"\"\n fig, axis = super(AutoARIMAForecaster, self)._plot_residuals(y=np.asarray(self._train_dt['y']),\n yhat=np.asarray(self.fittedvalues),\n _id=\" Auto ARIMA\")\n\n plt.gcf().autofmt_xdate()\n plt.grid(True)\n plt.show()\n\n def ts_test(self, show_plot=True):\n \"\"\"Test the fitted model if test data available\"\"\"\n if super(AutoARIMAForecaster, self)._check_ts_test() < 0:\n return\n\n n_forecast = len(self._test_dt)\n\n self._aarima_logger.info(\"Evaluating the fitted ARIMA model on the test data...\")\n future, confint = self.model_fit.predict(n_periods=n_forecast, return_conf_int=True)\n self.forecast = pd.Series(future, index=self._test_dt.index)\n self.lower_conf_int = pd.Series(confint[:, 0], index=self._test_dt.index)\n self.upper_conf_int = pd.Series(confint[:, 1], index=self._test_dt.index)\n\n self.residuals_forecast = pd.Series(np.asarray(self._test_dt['y']) - np.asarray(self.forecast),\n index=self._test_dt.index)\n self.measure_rmse()\n self._aarima_logger.info(\"RMSE on test data: {}\".format(self.rmse))\n\n # plot\n if show_plot:\n self.plot_forecast()\n\n def ts_forecast(self, n_forecast, suppress=False):\n \"\"\"Forecast time series over time frame in the future specified via n_forecast\"\"\"\n #\n n_forecast = super(AutoARIMAForecaster, self)._check_ts_forecast(n_forecast)\n #\n self._aarima_logger.info(\"Fitting using all data....\")\n self._mode = 'forecast'\n self.ts_fit(suppress=suppress)\n\n self._aarima_logger.info(\"Forecasting next \" + str(n_forecast) + str(self.ts_df.index.freq))\n #\n future, confint = self.model_fit.predict(n_periods=n_forecast, return_conf_int=True)\n idx_future = self._gen_idx_future(n_forecast=n_forecast)\n self.forecast = pd.Series(future, index=idx_future)\n if self.lower_conf_int is None and self.upper_conf_int is None:\n self.lower_conf_int = pd.Series(confint[:, 0], index=idx_future)\n self.upper_conf_int = pd.Series(confint[:, 1], index=idx_future)\n else:\n self.lower_conf_int = pd.concat([self.lower_conf_int, pd.Series(confint[:, 0], index=idx_future)], axis=0)\n self.upper_conf_int = pd.concat([self.upper_conf_int, pd.Series(confint[:, 1], index=idx_future)], axis=0)\n\n self.residuals_forecast = None\n # self.plot_forecast()\n return self\n\n def plot_forecast(self):\n \"\"\"Plot forecasted values\"\"\"\n fig, axis = super(AutoARIMAForecaster, self)._plot_forecast(y=np.asarray(self._train_dt['y']),\n yhat=np.asarray(self.fittedvalues),\n forecast=self.forecast, _id='Auto ARIMA')\n plt.gcf().autofmt_xdate()\n plt.grid(True)\n plt.show()\n"
] |
[
[
"pandas.Series",
"numpy.asarray",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show"
]
] |
yukihiko/human-pose-estimation.pytorch
|
[
"187f60ac2088f91b9a48d9e5f4c2b1053a9f32bf"
] |
[
"lib/core/loss.py"
] |
[
"# ------------------------------------------------------------------------------\r\n# Copyright (c) Microsoft\r\n# Licensed under the MIT License.\r\n# Written by Bin Xiao (Bin.Xiao@microsoft.com)\r\n# ------------------------------------------------------------------------------\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport scipy.ndimage.filters as fi\r\n\r\n\r\nclass JointsMSELoss(nn.Module):\r\n def __init__(self, use_target_weight, heatmap_size):\r\n super(JointsMSELoss, self).__init__()\r\n self.criterion = nn.MSELoss(size_average=True)\r\n self.use_target_weight = use_target_weight\r\n self.col = float(heatmap_size)\r\n self.scale = 224./float(self.col)\r\n self.gaussian = 1.0\r\n\r\n def min_max(self, x, axis=None):\r\n min = x.min(axis=axis, keepdims=True)\r\n max = x.max(axis=axis, keepdims=True)\r\n result = (x-min)/(max-min)\r\n return torch.Tensor(result)\r\n\r\n def checkMatrix(self, xi, yi):\r\n f = False\r\n if xi >= 0 and xi <= self.col - 1 and yi >= 0 and yi <= self.col - 1:\r\n f = True\r\n return xi, yi, f\r\n\r\n def forward(self, offset, heatmap, target, target_weight, meta, isValid=False, useOffset=False):\r\n batch_size = heatmap.size(0)\r\n num_joints = heatmap.size(1)\r\n \r\n joints = meta['joints']\r\n joints_vis = meta['joints_vis']\r\n joints = joints[:, :, :2].float().cuda()\r\n joints_vis = joints_vis[:, :, :2].float().cuda()\r\n x = Variable(torch.zeros(joints.size()).float(), requires_grad=True).cuda()\r\n\r\n '''\r\n heatmaps_pred = heatmap.reshape((batch_size, num_joints, -1)).split(1, 1)\r\n heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)\r\n loss = 0\r\n\r\n for idx in range(num_joints):\r\n heatmap_pred = heatmaps_pred[idx].squeeze()\r\n heatmap_gt = heatmaps_gt[idx].squeeze()\r\n if self.use_target_weight:\r\n loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight[:, idx]), heatmap_gt.mul(target_weight[:, idx]))\r\n else:\r\n loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)\r\n\r\n d1 = loss / num_joints\r\n '''\r\n reshaped = heatmap.view(-1, num_joints, int(self.col*self.col))\r\n _, argmax = reshaped.max(-1)\r\n yCoords = argmax/self.col\r\n xCoords = argmax - yCoords*self.col\r\n\r\n s = heatmap.size()\r\n tt = torch.zeros(s).float()\r\n ti = joints/self.scale\r\n\r\n for i in range(batch_size):\r\n for j in range(num_joints):\r\n #if h[i, j, yCoords[i, j], xCoords[i, j]] > 0.5:\r\n x[i, j, 0] = (offset[i, j, yCoords[i, j], xCoords[i, j]] + xCoords[i, j].float()) * self.scale\r\n x[i, j, 1] = (offset[i, j + num_joints, yCoords[i, j], xCoords[i, j]] + yCoords[i, j].float()) * self.scale\r\n\r\n if int(target_weight[i, j, 0]) >= 0.5:\r\n xi, yi, f = self.checkMatrix(int(ti[i, j, 0]), int(ti[i, j, 1]))\r\n \r\n if f == True:\r\n # 正規分布に近似したサンプルを得る\r\n # 平均は 100 、標準偏差を 1 \r\n tt[i, j, yi, xi] = 1\r\n tt[i, j] = self.min_max(fi.gaussian_filter(tt[i, j], self.gaussian))\r\n else:\r\n target_weight[i, j, 0] = 0\r\n #target_weight[i, j, 1] = 0\r\n \r\n diff1 = heatmap - target\r\n '''\r\n cnt = 0\r\n for i in range(batch_size):\r\n for j in range(num_joints):\r\n if int(target_weight[i, j, 0]) == 0:\r\n diff1[i, j] = diff1[i, j]*0\r\n else:\r\n cnt = cnt + 1\r\n diff1 = diff1.view(-1)\r\n d1 = diff1.dot(diff1) / cnt\r\n '''\r\n diff1 = diff1.view(-1)\r\n d1 = diff1.dot(diff1) / (batch_size*num_joints)\r\n\r\n if useOffset == False:\r\n return d1, x, tt, target_weight\r\n\r\n diff2 = (x - joints)\r\n '''\r\n diff2 = diff2*joints_vis/112.\r\n N2 = (joints_vis.sum()).data[0]/2.0\r\n diff2 = diff2.view(-1)\r\n d2 = 0.5 * torch.sqrt(diff2.dot(diff2))/N2\r\n '''\r\n diff2 = diff2.view(-1)\r\n d2 = 0.5 * torch.sqrt(diff2.dot(diff2))/(batch_size*num_joints)\r\n return d1 + d2, x, tt, target_weight\r\n"
] |
[
[
"scipy.ndimage.filters.gaussian_filter",
"torch.nn.MSELoss",
"torch.Tensor",
"torch.zeros"
]
] |
jercas/TextBoxes_plusplus_tf
|
[
"035a5d202202662e2b1230e05a41d20a044b9145",
"035a5d202202662e2b1230e05a41d20a044b9145"
] |
[
"datasets/synthtext2TFrecords_self.py",
"datasets/TFrecords2Dataset.py"
] |
[
"import time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim as slim\r\nimport util\r\n\r\n\r\ndef int64_feature(value):\r\n \"\"\"Wrapper for inserting int64 features into Example proto.\r\n \"\"\"\r\n if not isinstance(value, list):\r\n value = [value]\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\r\n\r\n\r\ndef float_feature(value):\r\n \"\"\"Wrapper for inserting float features into Example proto.\r\n \"\"\"\r\n if not isinstance(value, list):\r\n value = [value]\r\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\r\n\r\n\r\ndef bytes_feature(value):\r\n \"\"\"Wrapper for inserting bytes features into Example proto.\r\n \"\"\"\r\n if not isinstance(value, list):\r\n value = [value]\r\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\r\n\r\n\r\ndef image_to_tfexample(image_data, image_format, height, width, class_id):\r\n return tf.train.Example(features=tf.train.Features(feature={\r\n 'image/encoded': bytes_feature(image_data),\r\n 'image/format': bytes_feature(image_format),\r\n 'image/class/label': int64_feature(class_id),\r\n 'image/height': int64_feature(height),\r\n 'image/width': int64_feature(width),\r\n }))\r\n\r\n\r\ndef convert_to_example(image_data, filename, labels, ignored, labels_text, bboxes, oriented_bboxes, shape):\r\n \"\"\"Build an Example proto for an image example.\r\n Args:\r\n image_data: string, JPEG encoding of RGB image\r\n labels: list of integers, identifier for the ground truth\r\n labels_text: list of strings, human-readable labels\r\n oriented_bboxes: list of bounding oriented boxes each box is a list of floats in [0, 1]\r\n specifying [x1, y1, x2, y2, x3, y3, x4, y4]\r\n bboxes: list of bbox in rectangle, [xmin, ymin, xmax, ymax] \r\n Returns:\r\n Example proto\r\n \"\"\"\r\n \r\n image_format = b'JPEG'\r\n oriented_bboxes = np.asarray(oriented_bboxes)\r\n bboxes = np.asarray(bboxes)\r\n example = tf.train.Example(features=tf.train.Features(feature={\r\n 'image/shape': int64_feature(list(shape)),\r\n 'image/object/bbox/xmin': float_feature(list(bboxes[:, 0])),\r\n 'image/object/bbox/ymin': float_feature(list(bboxes[:, 1])),\r\n 'image/object/bbox/xmax': float_feature(list(bboxes[:, 2])),\r\n 'image/object/bbox/ymax': float_feature(list(bboxes[:, 3])),\r\n 'image/object/bbox/x1': float_feature(list(oriented_bboxes[:, 0])),\r\n 'image/object/bbox/y1': float_feature(list(oriented_bboxes[:, 1])),\r\n 'image/object/bbox/x2': float_feature(list(oriented_bboxes[:, 2])),\r\n 'image/object/bbox/y2': float_feature(list(oriented_bboxes[:, 3])),\r\n 'image/object/bbox/x3': float_feature(list(oriented_bboxes[:, 4])),\r\n 'image/object/bbox/y3': float_feature(list(oriented_bboxes[:, 5])),\r\n 'image/object/bbox/x4': float_feature(list(oriented_bboxes[:, 6])),\r\n 'image/object/bbox/y4': float_feature(list(oriented_bboxes[:, 7])),\r\n 'image/object/bbox/label': int64_feature(labels),\r\n 'image/object/bbox/label_text': bytes_feature(labels_text),\r\n 'image/object/bbox/ignored': int64_feature(ignored),\r\n 'image/format': bytes_feature(image_format),\r\n 'image/filename': bytes_feature(filename),\r\n 'image/encoded': bytes_feature(image_data)}))\r\n return example\r\n\r\n\r\n\r\ndef get_split(split_name, dataset_dir, file_pattern, num_samples, reader=None):\r\n dataset_dir = util.io.get_absolute_path(dataset_dir)\r\n \r\n if util.str.contains(file_pattern, '%'):\r\n file_pattern = util.io.join_path(dataset_dir, file_pattern % split_name)\r\n else:\r\n file_pattern = util.io.join_path(dataset_dir, file_pattern)\r\n # Allowing None in the signature so that dataset_factory can use the default.\r\n if reader is None:\r\n reader = tf.TFRecordReader\r\n keys_to_features = {\r\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\r\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),\r\n 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),\r\n 'image/shape': tf.FixedLenFeature([3], tf.int64),\r\n 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x1': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x2': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x3': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x4': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y1': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y2': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y3': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y4': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ignored': tf.VarLenFeature(dtype=tf.int64),\r\n 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),\r\n }\r\n items_to_handlers = {\r\n 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),\r\n 'shape': slim.tfexample_decoder.Tensor('image/shape'),\r\n 'filename': slim.tfexample_decoder.Tensor('image/filename'),\r\n 'object/bbox': slim.tfexample_decoder.BoundingBox(\r\n ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),\r\n 'object/oriented_bbox/x1': slim.tfexample_decoder.Tensor('image/object/bbox/x1'),\r\n 'object/oriented_bbox/x2': slim.tfexample_decoder.Tensor('image/object/bbox/x2'),\r\n 'object/oriented_bbox/x3': slim.tfexample_decoder.Tensor('image/object/bbox/x3'),\r\n 'object/oriented_bbox/x4': slim.tfexample_decoder.Tensor('image/object/bbox/x4'),\r\n 'object/oriented_bbox/y1': slim.tfexample_decoder.Tensor('image/object/bbox/y1'),\r\n 'object/oriented_bbox/y2': slim.tfexample_decoder.Tensor('image/object/bbox/y2'),\r\n 'object/oriented_bbox/y3': slim.tfexample_decoder.Tensor('image/object/bbox/y3'),\r\n 'object/oriented_bbox/y4': slim.tfexample_decoder.Tensor('image/object/bbox/y4'),\r\n 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),\r\n 'object/ignored': slim.tfexample_decoder.Tensor('image/object/bbox/ignored')\r\n }\r\n decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\r\n\r\n labels_to_names = {0:'background', 1:'text'}\r\n items_to_descriptions = {\r\n 'image': 'A color image of varying height and width.',\r\n 'shape': 'Shape of the image',\r\n 'object/bbox': 'A list of bounding boxes, one per each object.',\r\n 'object/label': 'A list of labels, one per each object.',\r\n }\r\n\r\n return slim.dataset.Dataset(\r\n data_sources=file_pattern,\r\n reader=reader,\r\n decoder=decoder,\r\n num_samples=num_samples,\r\n items_to_descriptions=items_to_descriptions,\r\n num_classes=2,\r\n labels_to_names=labels_to_names)\r\n\r\n\r\nclass SynthTextDataFetcher():\r\n def __init__(self, mat_path, root_path):\r\n self.mat_path = mat_path\r\n self.root_path = root_path\r\n self._load_mat()\r\n \r\n # @util.dec.print_calling \r\n def _load_mat(self):\r\n data = util.io.load_mat(self.mat_path)\r\n self.image_paths = data['imnames'][0]\r\n self.image_bbox = data['wordBB'][0]\r\n self.txts = data['txt'][0]\r\n self.num_images = len(self.image_paths)\r\n\r\n def get_image_path(self, idx):\r\n image_path = util.io.join_path(self.root_path, self.image_paths[idx][0])\r\n return image_path\r\n\r\n def get_num_words(self, idx):\r\n try:\r\n return np.shape(self.image_bbox[idx])[2]\r\n except: # error caused by dataset\r\n return 1\r\n\r\n\r\n def get_word_bbox(self, img_idx, word_idx):\r\n boxes = self.image_bbox[img_idx]\r\n if len(np.shape(boxes)) ==2: # error caused by dataset\r\n boxes = np.reshape(boxes, (2, 4, 1))\r\n \r\n xys = boxes[:,:, word_idx]\r\n assert(np.shape(xys) ==(2, 4))\r\n return np.float32(xys)\r\n \r\n def normalize_bbox(self, xys, width, height):\r\n xs = xys[0, :]\r\n ys = xys[1, :]\r\n \r\n min_x = min(xs)\r\n min_y = min(ys)\r\n max_x = max(xs)\r\n max_y = max(ys)\r\n \r\n # bound them in the valid range\r\n min_x = max(0, min_x)\r\n min_y = max(0, min_y)\r\n max_x = min(width, max_x)\r\n max_y = min(height, max_y)\r\n \r\n # check the w, h and area of the rect\r\n w = max_x - min_x\r\n h = max_y - min_y\r\n is_valid = True\r\n \r\n if w < 10 or h < 10:\r\n is_valid = False\r\n \r\n if w * h < 100:\r\n is_valid = False\r\n \r\n xys[0, :] = xys[0, :] / width\r\n xys[1, :] = xys[1, :] / height\r\n \r\n return is_valid, min_x / width, min_y /height, max_x / width, max_y / height, xys\r\n \r\n def get_txt(self, image_idx, word_idx):\r\n txts = self.txts[image_idx]\r\n clean_txts = []\r\n for txt in txts:\r\n clean_txts += txt.split()\r\n return str(clean_txts[word_idx])\r\n \r\n \r\n def fetch_record(self, image_idx):\r\n image_path = self.get_image_path(image_idx)\r\n if not (util.io.exists(image_path)):\r\n return None\r\n img = util.img.imread(image_path)\r\n h, w = img.shape[0:-1]\r\n num_words = self.get_num_words(image_idx)\r\n rect_bboxes = []\r\n full_bboxes = []\r\n txts = []\r\n for word_idx in range(num_words):\r\n xys = self.get_word_bbox(image_idx, word_idx) \r\n is_valid, min_x, min_y, max_x, max_y, xys = self.normalize_bbox(xys, width = w, height = h)\r\n if not is_valid:\r\n continue\r\n rect_bboxes.append([min_x, min_y, max_x, max_y])\r\n xys = np.reshape(np.transpose(xys), -1)\r\n full_bboxes.append(xys)\r\n txt = self.get_txt(image_idx, word_idx)\r\n txts.append(txt)\r\n if len(rect_bboxes) == 0:\r\n return None\r\n \r\n return image_path, img, txts, rect_bboxes, full_bboxes\r\n \r\n \r\n\r\ndef cvt_to_tfrecords(output_path , data_path, gt_path, records_per_file = 30000):\r\n\r\n fetcher = SynthTextDataFetcher(root_path = data_path, mat_path = gt_path)\r\n fid = 0\r\n image_idx = -1\r\n while image_idx < fetcher.num_images:\r\n with tf.python_io.TFRecordWriter(output_path%(fid)) as tfrecord_writer:\r\n record_count = 0\r\n while record_count != records_per_file:\r\n image_idx += 1\r\n if image_idx >= fetcher.num_images:\r\n break\r\n print(\"loading image %d/%d\"%(image_idx + 1, fetcher.num_images))\r\n record = fetcher.fetch_record(image_idx)\r\n if record is None:\r\n print('\\nimage %d does not exist'%(image_idx + 1))\r\n continue\r\n\r\n image_path, image, txts, rect_bboxes, oriented_bboxes = record\r\n labels = len(rect_bboxes) * [1]\r\n ignored = len(rect_bboxes) * [0]\r\n image_data = tf.gfile.FastGFile(image_path, 'r').read()\r\n \r\n shape = image.shape\r\n image_name = str(util.io.get_filename(image_path).split('.')[0])\r\n example = convert_to_example(image_data, image_name, labels, ignored, txts, rect_bboxes, oriented_bboxes, shape)\r\n tfrecord_writer.write(example.SerializeToString())\r\n record_count += 1\r\n\r\n fid += 1\r\n \r\nif __name__ == \"__main__\":\r\n mat_path = util.io.get_absolute_path('/share/SynthText/gt.mat')\r\n root_path = util.io.get_absolute_path('/share/SynthText/')\r\n output_dir = util.io.get_absolute_path('/home/zsz/datasets/synth-tf/')\r\n util.io.mkdir(output_dir)\r\n cvt_to_tfrecords(output_path = util.io.join_path(output_dir, 'SynthText_%d.tfrecord'),\r\n data_path = root_path,\r\n gt_path = mat_path)",
"import os\r\nimport glob\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim as slim\r\n\r\n\"\"\"\r\nTransform the tfrecord to tf.slim data provider format\r\n\"\"\"\r\n\r\nITEMS_TO_DESCRIPTIONS = {\r\n 'image': 'A color image of varying height and width.',\r\n 'shape': 'Shape of the image',\r\n 'object/bbox': 'A list of bounding boxes, one per each object.',\r\n 'object/label': 'A list of labels, one per each object.',\r\n}\r\n\r\nSPLITS_TO_SIZES = {\r\n #'train': 2518 # for ppt datasets\r\n #'train': 858750 # for synth text datasets\r\n 'train': 1000 # for icdar 2015 datasets\r\n}\r\n\r\nNUM_CLASSES = 2\r\n\r\ndef get_datasets(data_dir, file_pattern = '*.tfrecord'):\r\n file_patterns = os.path.join(data_dir, file_pattern)\r\n print('file_path: {}'.format(file_patterns))\r\n file_path_list = glob.glob(file_patterns)\r\n num_samples = 0\r\n #num_samples = 288688 #only for ppt datasets\r\n #num_samples = 858750 #only for synth datasets\r\n\r\n for file_path in file_path_list:\r\n for _ in tf.python_io.tf_record_iterator(file_path):\r\n num_samples += 1\r\n print('num_samples:', num_samples)\r\n\r\n reader = tf.TFRecordReader\r\n\r\n keys_to_features = {\r\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\r\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),\r\n 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),\r\n 'image/shape': tf.FixedLenFeature([3], tf.int64),\r\n 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x1': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x2': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x3': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/x4': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y1': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y2': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y3': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/y4': tf.VarLenFeature(dtype=tf.float32),\r\n 'image/object/bbox/ignored': tf.VarLenFeature(dtype=tf.int64),\r\n 'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),\r\n }\r\n\r\n items_to_handlers = {\r\n 'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),\r\n 'shape': slim.tfexample_decoder.Tensor('image/shape'),\r\n 'filename': slim.tfexample_decoder.Tensor('image/filename'),\r\n 'object/bbox': slim.tfexample_decoder.BoundingBox(\r\n ['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),\r\n 'object/oriented_bbox/x1': slim.tfexample_decoder.Tensor('image/object/bbox/x1'),\r\n 'object/oriented_bbox/x2': slim.tfexample_decoder.Tensor('image/object/bbox/x2'),\r\n 'object/oriented_bbox/x3': slim.tfexample_decoder.Tensor('image/object/bbox/x3'),\r\n 'object/oriented_bbox/x4': slim.tfexample_decoder.Tensor('image/object/bbox/x4'),\r\n 'object/oriented_bbox/y1': slim.tfexample_decoder.Tensor('image/object/bbox/y1'),\r\n 'object/oriented_bbox/y2': slim.tfexample_decoder.Tensor('image/object/bbox/y2'),\r\n 'object/oriented_bbox/y3': slim.tfexample_decoder.Tensor('image/object/bbox/y3'),\r\n 'object/oriented_bbox/y4': slim.tfexample_decoder.Tensor('image/object/bbox/y4'),\r\n 'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),\r\n 'object/ignored': slim.tfexample_decoder.Tensor('image/object/bbox/ignored')\r\n }\r\n\r\n decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)\r\n\r\n labels_to_names = {0:'background', 1:'text'}\r\n\r\n return slim.dataset.Dataset(\r\n data_sources=file_patterns,\r\n reader=reader,\r\n decoder=decoder,\r\n num_samples=num_samples,\r\n items_to_descriptions=ITEMS_TO_DESCRIPTIONS,\r\n num_classes=NUM_CLASSES,\r\n labels_to_names=labels_to_names)"
] |
[
[
"tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder",
"tensorflow.FixedLenFeature",
"numpy.asarray",
"numpy.reshape",
"tensorflow.contrib.slim.dataset.Dataset",
"numpy.transpose",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.contrib.slim.tfexample_decoder.BoundingBox",
"numpy.shape",
"tensorflow.contrib.slim.tfexample_decoder.Tensor",
"numpy.float32",
"tensorflow.VarLenFeature",
"tensorflow.train.FloatList",
"tensorflow.train.BytesList",
"tensorflow.contrib.slim.tfexample_decoder.Image",
"tensorflow.gfile.FastGFile",
"tensorflow.train.Int64List"
],
[
"tensorflow.contrib.slim.tfexample_decoder.TFExampleDecoder",
"tensorflow.FixedLenFeature",
"tensorflow.contrib.slim.dataset.Dataset",
"tensorflow.contrib.slim.tfexample_decoder.BoundingBox",
"tensorflow.contrib.slim.tfexample_decoder.Tensor",
"tensorflow.VarLenFeature",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.contrib.slim.tfexample_decoder.Image"
]
] |
rddunphy/MusicClassification
|
[
"efa58e474d2ff68cc24a07dd19b61e4dbac93765"
] |
[
"results/lstm/hpcp_lstm COMP 15s bs=10 ts=2000/12bin_lstm_classifier_comp.py"
] |
[
"#!/usr/bin/env python3\n\nimport csv\nimport os\nimport pickle\nfrom datetime import datetime\n\nimport numpy as np\nimport tensorflow as tf\n\nHPCP_PATH = \"corpus/bcrm_hpcp_12bin\" # Source directory\nCHKPT_INTERVAL = 50 # How often to create checkpoints\nINPUT_DIMENSION = 12 # Number of HPCP bins\nSEQUENCE_LENGTH = 2570 # 10322 for 60 second samples\nBATCH_SIZE = 10\nLABELS = ['cor', 'viv', 'bac', 'hay', 'moz', 'bee',\n 'bra', 'tch', 'mah', 'str', 'sho', 'mes']\nN_EPOCHS = 1000\nTRAIN_SAMPLES = 2000 # 500 for 60 second samples\nN_HIDDEN = 24\n\n\ndef _one_hot(sample):\n return [1 if sample[2:5] == x else 0 for x in LABELS]\n\n\ndef _load_data(path, limit=None):\n samples = os.listdir(path)\n np.random.shuffle(samples)\n d = []\n o = []\n if not limit:\n limit = len(samples)\n for i in range(limit):\n sample = samples[i]\n file = os.path.join(path, sample)\n d.append(pickle.load(open(file, 'rb')))\n o.append(_one_hot(sample))\n return d, o\n\n\nif __name__ == '__main__':\n n_batches = int(TRAIN_SAMPLES/BATCH_SIZE)\n\n run_id = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n data_path = os.path.join(\"tf\", \"lstm_data_\" + run_id)\n log_path = os.path.join(data_path, \"log\")\n acc_csv_path = os.path.join(data_path, \"acc.csv\")\n\n if not os.path.isdir(data_path):\n os.mkdir(data_path)\n\n with open(acc_csv_path, 'w') as csv_file:\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n writer.writerow([\"Epoch\", \"Training accuracy\",\n \"Validation accuracy\"])\n\n valid_path = os.path.join(HPCP_PATH, \"valid\")\n valid_input, valid_output = _load_data(valid_path)\n\n data = tf.placeholder(\n tf.float32, [None, SEQUENCE_LENGTH, INPUT_DIMENSION])\n target = tf.placeholder(tf.float32, [None, len(LABELS)])\n\n cell = tf.nn.rnn_cell.LSTMCell(N_HIDDEN, state_is_tuple=True)\n\n val, state = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)\n val = tf.transpose(val, [1, 0, 2])\n last = tf.gather(val, int(val.get_shape()[0]) - 1)\n\n W = tf.Variable(tf.truncated_normal(\n [N_HIDDEN, int(target.get_shape()[1])]))\n b = tf.Variable(tf.constant(0.1, shape=[target.get_shape()[1]]))\n\n prediction = tf.nn.softmax(tf.matmul(last, W) + b)\n\n clipped = tf.clip_by_value(prediction, 1e-10, 1.0)\n cross_entropy = -tf.reduce_sum(target * tf.log(clipped))\n\n optimizer = tf.train.AdamOptimizer()\n minimize = optimizer.minimize(cross_entropy)\n\n mistakes = tf.not_equal(tf.argmax(target, 1),\n tf.argmax(prediction, 1))\n error = tf.reduce_mean(tf.cast(mistakes, tf.float32))\n accuracy = tf.subtract(1.0, error)\n\n conf_matrix = tf.confusion_matrix(\n tf.argmax(target, 1), tf.argmax(prediction, 1))\n\n tf.summary.scalar('accuracy', accuracy)\n\n sess = tf.Session()\n\n merged = tf.summary.merge_all()\n train_log_path = os.path.join(log_path, \"train\")\n train_writer = tf.summary.FileWriter(train_log_path, sess.graph)\n test_log_path = os.path.join(log_path, \"validation\")\n test_writer = tf.summary.FileWriter(test_log_path)\n\n saver = tf.train.Saver()\n\n tf.global_variables_initializer().run(session=sess)\n\n train_path = os.path.join(HPCP_PATH, \"train\")\n for e in range(N_EPOCHS):\n train_input, train_output = _load_data(\n train_path, limit=TRAIN_SAMPLES)\n ptr = 0\n for j in range(n_batches):\n in_ = train_input[ptr:ptr+BATCH_SIZE]\n out = train_output[ptr:ptr+BATCH_SIZE]\n ptr += BATCH_SIZE\n sess.run(minimize, {data: in_, target: out})\n\n train_sum, train_acc = sess.run(\n [merged, accuracy],\n {data: train_input, target: train_output})\n train_writer.add_summary(train_sum, e)\n\n # Calculate and write validation accuracy\n test_sum, test_acc = sess.run(\n [merged, accuracy],\n {data: valid_input, target: valid_output})\n test_writer.add_summary(test_sum, e)\n print((\"Epoch {:3d}: training accuracy {:3.1f}%, \"\n \"validation accuracy {:3.1f}%\").format(\n e + 1, 100 * train_acc, 100 * test_acc))\n\n # Save accuracy to CSV\n with open(acc_csv_path, 'a') as csv_file:\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n csv_row = [e+1, 100 * train_acc, 100 * test_acc]\n writer.writerow(csv_row)\n\n if (e+1) % CHKPT_INTERVAL == 0 or e == N_EPOCHS - 1:\n # Create checkpoint and print confusion matrix\n chkpt_dir = \"chkpt_{}\".format(e+1)\n save_path = os.path.join(data_path, chkpt_dir)\n saver.save(sess, save_path)\n print(sess.run(conf_matrix,\n {data: valid_input, target: valid_output}))\n\n # Print final test accuracy and confusion matrix\n test_input, test_output = _load_data(HPCP_PATH + \"/test\")\n test_acc = sess.run(accuracy,\n {data: test_input, target: test_output})\n print(\"Final test accuracy {:3.1f}%\".format(100 * test_acc))\n print(sess.run(conf_matrix,\n {data: test_input, target: test_output}))\n\n sess.close()\n"
] |
[
[
"tensorflow.clip_by_value",
"tensorflow.nn.dynamic_rnn",
"tensorflow.matmul",
"tensorflow.transpose",
"tensorflow.summary.FileWriter",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.cast",
"tensorflow.placeholder",
"numpy.random.shuffle",
"tensorflow.subtract",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.log",
"tensorflow.train.AdamOptimizer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.summary.scalar"
]
] |
kirilkoroves/torchvision-0.3.0
|
[
"39f46d141f6a7ac2b094545c33936ad4500d3c7d",
"34d640e5180cc5ab378f84af6ed596cb0c810e6c"
] |
[
"torchvision/models/googlenet.py",
"torchvision/datasets/celeba.py"
] |
[
"import warnings\nfrom collections import namedtuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .utils import load_state_dict_from_url\n\n__all__ = ['GoogLeNet', 'googlenet']\n\nmodel_urls = {\n # GoogLeNet ported from TensorFlow\n 'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',\n}\n\n_GoogLeNetOuputs = namedtuple('GoogLeNetOuputs', ['logits', 'aux_logits2', 'aux_logits1'])\n\n\ndef googlenet(pretrained=False, progress=True, **kwargs):\n r\"\"\"GoogLeNet (Inception v1) model architecture from\n `\"Going Deeper with Convolutions\" <http://arxiv.org/abs/1409.4842>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n aux_logits (bool): If True, adds two auxiliary branches that can improve training.\n Default: *False* when pretrained is True otherwise *True*\n transform_input (bool): If True, preprocesses the input according to the method with which it\n was trained on ImageNet. Default: *False*\n \"\"\"\n if pretrained:\n if 'transform_input' not in kwargs:\n kwargs['transform_input'] = True\n if 'aux_logits' not in kwargs:\n kwargs['aux_logits'] = False\n if kwargs['aux_logits']:\n warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '\n 'so make sure to train them')\n original_aux_logits = kwargs['aux_logits']\n kwargs['aux_logits'] = True\n kwargs['init_weights'] = False\n model = GoogLeNet(**kwargs)\n state_dict = load_state_dict_from_url(model_urls['googlenet'],\n progress=progress)\n model.load_state_dict(state_dict)\n if not original_aux_logits:\n model.aux_logits = False\n del model.aux1, model.aux2\n return model\n\n return GoogLeNet(**kwargs)\n\n\nclass GoogLeNet(nn.Module):\n\n def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True):\n super(GoogLeNet, self).__init__()\n self.aux_logits = aux_logits\n self.transform_input = transform_input\n\n self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)\n self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n self.conv2 = BasicConv2d(64, 64, kernel_size=1)\n self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)\n self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)\n self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)\n self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)\n\n self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)\n self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)\n self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)\n self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)\n self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)\n self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)\n\n self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)\n self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)\n\n if aux_logits:\n self.aux1 = InceptionAux(512, num_classes)\n self.aux2 = InceptionAux(528, num_classes)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.dropout = nn.Dropout(0.2)\n self.fc = nn.Linear(1024, num_classes)\n\n if init_weights:\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n import scipy.stats as stats\n X = stats.truncnorm(-2, 2, scale=0.01)\n values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)\n values = values.view(m.weight.size())\n with torch.no_grad():\n m.weight.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n if self.transform_input:\n x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n x = torch.cat((x_ch0, x_ch1, x_ch2), 1)\n\n # N x 3 x 224 x 224\n x = self.conv1(x)\n # N x 64 x 112 x 112\n x = self.maxpool1(x)\n # N x 64 x 56 x 56\n x = self.conv2(x)\n # N x 64 x 56 x 56\n x = self.conv3(x)\n # N x 192 x 56 x 56\n x = self.maxpool2(x)\n\n # N x 192 x 28 x 28\n x = self.inception3a(x)\n # N x 256 x 28 x 28\n x = self.inception3b(x)\n # N x 480 x 28 x 28\n x = self.maxpool3(x)\n # N x 480 x 14 x 14\n x = self.inception4a(x)\n # N x 512 x 14 x 14\n if self.training and self.aux_logits:\n aux1 = self.aux1(x)\n\n x = self.inception4b(x)\n # N x 512 x 14 x 14\n x = self.inception4c(x)\n # N x 512 x 14 x 14\n x = self.inception4d(x)\n # N x 528 x 14 x 14\n if self.training and self.aux_logits:\n aux2 = self.aux2(x)\n\n x = self.inception4e(x)\n # N x 832 x 14 x 14\n x = self.maxpool4(x)\n # N x 832 x 7 x 7\n x = self.inception5a(x)\n # N x 832 x 7 x 7\n x = self.inception5b(x)\n # N x 1024 x 7 x 7\n\n x = self.avgpool(x)\n # N x 1024 x 1 x 1\n x = x.view(x.size(0), -1)\n # N x 1024\n x = self.dropout(x)\n x = self.fc(x)\n # N x 1000 (num_classes)\n if self.training and self.aux_logits:\n return _GoogLeNetOuputs(x, aux2, aux1)\n return x\n\n\nclass Inception(nn.Module):\n\n def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):\n super(Inception, self).__init__()\n\n self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)\n\n self.branch2 = nn.Sequential(\n BasicConv2d(in_channels, ch3x3red, kernel_size=1),\n BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)\n )\n\n self.branch3 = nn.Sequential(\n BasicConv2d(in_channels, ch5x5red, kernel_size=1),\n BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1)\n )\n\n self.branch4 = nn.Sequential(\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),\n BasicConv2d(in_channels, pool_proj, kernel_size=1)\n )\n\n def forward(self, x):\n branch1 = self.branch1(x)\n branch2 = self.branch2(x)\n branch3 = self.branch3(x)\n branch4 = self.branch4(x)\n\n outputs = [branch1, branch2, branch3, branch4]\n return torch.cat(outputs, 1)\n\n\nclass InceptionAux(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super(InceptionAux, self).__init__()\n self.conv = BasicConv2d(in_channels, 128, kernel_size=1)\n\n self.fc1 = nn.Linear(2048, 1024)\n self.fc2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14\n x = F.adaptive_avg_pool2d(x, (4, 4))\n # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4\n x = self.conv(x)\n # N x 128 x 4 x 4\n x = x.view(x.size(0), -1)\n # N x 2048\n x = F.relu(self.fc1(x), inplace=True)\n # N x 2048\n x = F.dropout(x, 0.7, training=self.training)\n # N x 2048\n x = self.fc2(x)\n # N x 1024\n\n return x\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n",
"import torch\nimport os\nimport PIL\nfrom .vision import VisionDataset\nfrom .utils import download_file_from_google_drive, check_integrity\n\n\nclass CelebA(VisionDataset):\n \"\"\"`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.\n\n Args:\n root (string): Root directory where images are downloaded to.\n split (string): One of {'train', 'valid', 'test'}.\n Accordingly dataset is selected.\n target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,\n or ``landmarks``. Can also be a list to output a tuple with all specified target types.\n The targets represent:\n ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes\n ``identity`` (int): label for each person (data points with the same identity are the same person)\n ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)\n ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,\n righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)\n Defaults to ``attr``.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n base_folder = \"celeba\"\n # There currently does not appear to be a easy way to extract 7z in python (without introducing additional\n # dependencies). The \"in-the-wild\" (not aligned+cropped) images are only in 7z, so they are not available\n # right now.\n file_list = [\n # File ID MD5 Hash Filename\n (\"0B7EVK8r0v71pZjFTYXZWM3FlRnM\", \"00d2c5bc6d35e252742224ab0c1e8fcb\", \"img_align_celeba.zip\"),\n # (\"0B7EVK8r0v71pbWNEUjJKdDQ3dGc\", \"b6cd7e93bc7a96c2dc33f819aa3ac651\", \"img_align_celeba_png.7z\"),\n # (\"0B7EVK8r0v71peklHb0pGdDl6R28\", \"b6cd7e93bc7a96c2dc33f819aa3ac651\", \"img_celeba.7z\"),\n (\"0B7EVK8r0v71pblRyaVFSWGxPY0U\", \"75e246fa4810816ffd6ee81facbd244c\", \"list_attr_celeba.txt\"),\n (\"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS\", \"32bd1bd63d3c78cd57e08160ec5ed1e2\", \"identity_CelebA.txt\"),\n (\"0B7EVK8r0v71pbThiMVRxWXZ4dU0\", \"00566efa6fedff7a56946cd1c10f1c16\", \"list_bbox_celeba.txt\"),\n (\"0B7EVK8r0v71pd0FJY3Blby1HUTQ\", \"cc24ecafdb5b50baae59b03474781f8c\", \"list_landmarks_align_celeba.txt\"),\n # (\"0B7EVK8r0v71pTzJIdlJWdHczRlU\", \"063ee6ddb681f96bc9ca28c6febb9d1a\", \"list_landmarks_celeba.txt\"),\n (\"0B7EVK8r0v71pY0NSMzRuSXJEVkk\", \"d32c9cbf5e040fd4025c592c306e6668\", \"list_eval_partition.txt\"),\n ]\n\n def __init__(self, root,\n split=\"train\",\n target_type=\"attr\",\n transform=None, target_transform=None,\n download=False):\n import pandas\n super(CelebA, self).__init__(root)\n self.split = split\n if isinstance(target_type, list):\n self.target_type = target_type\n else:\n self.target_type = [target_type]\n self.transform = transform\n self.target_transform = target_transform\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted.' +\n ' You can use download=True to download it')\n\n self.transform = transform\n self.target_transform = target_transform\n\n if split.lower() == \"train\":\n split = 0\n elif split.lower() == \"valid\":\n split = 1\n elif split.lower() == \"test\":\n split = 2\n else:\n raise ValueError('Wrong split entered! Please use split=\"train\" '\n 'or split=\"valid\" or split=\"test\"')\n\n with open(os.path.join(self.root, self.base_folder, \"list_eval_partition.txt\"), \"r\") as f:\n splits = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)\n\n with open(os.path.join(self.root, self.base_folder, \"identity_CelebA.txt\"), \"r\") as f:\n self.identity = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)\n\n with open(os.path.join(self.root, self.base_folder, \"list_bbox_celeba.txt\"), \"r\") as f:\n self.bbox = pandas.read_csv(f, delim_whitespace=True, header=1, index_col=0)\n\n with open(os.path.join(self.root, self.base_folder, \"list_landmarks_align_celeba.txt\"), \"r\") as f:\n self.landmarks_align = pandas.read_csv(f, delim_whitespace=True, header=1)\n\n with open(os.path.join(self.root, self.base_folder, \"list_attr_celeba.txt\"), \"r\") as f:\n self.attr = pandas.read_csv(f, delim_whitespace=True, header=1)\n\n mask = (splits[1] == split)\n self.filename = splits[mask].index.values\n self.identity = torch.as_tensor(self.identity[mask].values)\n self.bbox = torch.as_tensor(self.bbox[mask].values)\n self.landmarks_align = torch.as_tensor(self.landmarks_align[mask].values)\n self.attr = torch.as_tensor(self.attr[mask].values)\n self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}\n\n def _check_integrity(self):\n for (_, md5, filename) in self.file_list:\n fpath = os.path.join(self.root, self.base_folder, filename)\n _, ext = os.path.splitext(filename)\n # Allow original archive to be deleted (zip and 7z)\n # Only need the extracted images\n if ext not in [\".zip\", \".7z\"] and not check_integrity(fpath, md5):\n return False\n\n # Should check a hash of the images\n return os.path.isdir(os.path.join(self.root, self.base_folder, \"img_align_celeba\"))\n\n def download(self):\n import zipfile\n\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n\n for (file_id, md5, filename) in self.file_list:\n download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)\n\n with zipfile.ZipFile(os.path.join(self.root, self.base_folder, \"img_align_celeba.zip\"), \"r\") as f:\n f.extractall(os.path.join(self.root, self.base_folder))\n\n def __getitem__(self, index):\n X = PIL.Image.open(os.path.join(self.root, self.base_folder, \"img_align_celeba\", self.filename[index]))\n\n target = []\n for t in self.target_type:\n if t == \"attr\":\n target.append(self.attr[index, :])\n elif t == \"identity\":\n target.append(self.identity[index, 0])\n elif t == \"bbox\":\n target.append(self.bbox[index, :])\n elif t == \"landmarks\":\n target.append(self.landmarks_align[index, :])\n else:\n raise ValueError(\"Target type \\\"{}\\\" is not recognized.\".format(t))\n target = tuple(target) if len(target) > 1 else target[0]\n\n if self.transform is not None:\n X = self.transform(X)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return X, target\n\n def __len__(self):\n return len(self.attr)\n\n def extra_repr(self):\n lines = [\"Target type: {target_type}\", \"Split: {split}\"]\n return '\\n'.join(lines).format(**self.__dict__)\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"scipy.stats.truncnorm",
"torch.nn.Conv2d",
"torch.unsqueeze",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.no_grad",
"torch.nn.BatchNorm2d"
],
[
"pandas.read_csv",
"torch.as_tensor"
]
] |
generalui/Qcodes-Package
|
[
"f167b33f9ab562f68783e582561ba915a81efd2b",
"f167b33f9ab562f68783e582561ba915a81efd2b"
] |
[
"qcodes/instrument_drivers/tektronix/AWG520.py",
"qcodes/data/hdf5_format.py"
] |
[
"# Tektronix_AWG520.py class, to perform the communication between the Wrapper and the device\n# Pieter de Groot <pieterdegroot@gmail.com>, 2008\n# Martijn Schaafsma <qtlab@mcschaafsma.nl>, 2008\n# Vishal Ranjan, 2012\n# Ron schutjens, 2012\n# Adriaan Rol, 2016 Ported to QCodes\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n\nimport time\nimport logging\nimport numpy as np\nimport struct\nfrom qcodes import VisaInstrument, validators as vals\n\n\nclass Tektronix_AWG520(VisaInstrument):\n '''\n This is the python driver for the Tektronix AWG520\n Arbitrary Waveform Generator\n\n .. todo::\n\n 1) Get All\n 2) Remove test_send??\n 3) Add docstrings\n\n .. todo::\n\n use inheritance for common use with 520, currently contains\n a lot of repetition\n '''\n\n def __init__(self, name, address, reset=False, clock=1e9, numpoints=1000,\n **kw):\n '''\n Initializes the AWG520.\n\n Args:\n name (string) : name of the instrument\n address (string) : GPIB address (Note: 520 cannot be controlled\n via ethernet)\n reset (bool) : resets to default values, default=false\n numpoints (int) : sets the number of datapoints\n\n Output:\n None\n '''\n super().__init__(name, address, **kw)\n\n self._address = address\n self._values = {}\n self._values['files'] = {}\n self._clock = clock\n self._numpoints = numpoints\n self._fname = ''\n\n self.add_function('reset', call_cmd='*RST')\n self.add_parameter('state',\n get_cmd=self.get_state)\n\n # Add parameters\n self.add_parameter('trigger_mode',\n get_cmd='AWGC:RMOD?',\n set_cmd='AWGC:RMOD ' + '{}',\n vals=vals.Enum('CONT', 'TRIG', 'ENH', 'GAT'))\n self.add_parameter('trigger_impedance',\n unit='Ohm',\n label='Trigger impedance (Ohm)',\n get_cmd='TRIG:IMP?',\n set_cmd='TRIG:IMP '+'{}',\n vals=vals.Enum(50, 1000),\n get_parser=float)\n self.add_parameter('trigger_level',\n unit='V',\n label='Trigger level (V)',\n get_cmd='TRIG:LEV?',\n set_cmd='TRIG:LEV '+'{:.3f}',\n vals=vals.Numbers(-5, 5),\n get_parser=float)\n\n self.add_parameter('clock_freq',\n label='Clock frequency (Hz)',\n get_cmd='SOUR:FREQ?',\n set_cmd='SOUR:FREQ '+'{}',\n vals=vals.Numbers(1e6, 1e9),\n get_parser=float)\n # Todo check if max freq is 1.2 GHz for the AWG 520 aswell\n self.add_parameter('numpoints',\n label='Number of datapoints per wave',\n get_cmd=self._do_get_numpoints,\n set_cmd=self._do_set_numpoints,\n vals=vals.Ints(100, int(1e9)))\n\n for ch in [1, 2]:\n amp_cmd = 'SOUR{}:VOLT:LEV:IMM:AMPL'.format(ch)\n offset_cmd = 'SOUR{}:VOLT:LEV:IMM:OFFS'.format(ch)\n\n self.add_parameter(\n 'ch{}_filename'.format(ch), set_cmd=self._gen_ch_set_func(\n self._do_set_filename, ch), vals=vals.Anything())\n self.add_parameter('ch{}_amp'.format(ch),\n label='Amplitude channel {} (V)'.format(ch),\n unit='V',\n get_cmd=amp_cmd + '?',\n set_cmd=amp_cmd + ' {:.6f}',\n vals=vals.Numbers(0.02, 2.0),\n get_parser=float)\n\n self.add_parameter('ch{}_offset'.format(ch),\n label='Offset channel {} (V)'.format(ch),\n unit='V',\n get_cmd=offset_cmd + '?',\n set_cmd=offset_cmd + ' {:.3f}',\n vals=vals.Numbers(-1.0, 1.0),\n get_parser=float)\n self.add_parameter('ch{}_status'.format(ch),\n get_cmd='OUTP{}?'.format(ch),\n set_cmd='OUTP{}'.format(ch) + ' {}',\n vals=vals.Enum('ON', 'OFF'),\n get_parser=float)\n\n for j in [1, 2]:\n # TODO: check that 520 does not have marker delay feature\n # m_del_cmd = 'SOUR{}:MARK{}:DEL'.format(ch, j)\n m_high_cmd = 'SOUR{}:MARK{}:VOLT:LEV:IMM:HIGH'.format(ch, j)\n m_low_cmd = 'SOUR{}:MARK{}:VOLT:LEV:IMM:LOW'.format(ch, j)\n\n self.add_parameter(\n 'ch{}_m{}_high'.format(ch, j),\n label='Channel {} Marker {} high level (V)'.format(ch, j),\n get_cmd=m_high_cmd + '?',\n set_cmd=m_high_cmd + ' {:.3f}',\n vals=vals.Numbers(-2., 2.),\n get_parser=float)\n self.add_parameter(\n 'ch{}_m{}_low'.format(ch, j),\n label='Channel {} Marker {} low level (V)'.format(ch, j),\n get_cmd=m_low_cmd + '?',\n set_cmd=m_low_cmd + ' {:.3f}',\n vals=vals.Numbers(-2., 2.),\n get_parser=float)\n\n # Add functions\n if reset:\n self.reset()\n else:\n self.get_all()\n self.connect_message()\n\n # Functions\n def _gen_ch_set_func(self, fun, ch):\n def set_func(val):\n return fun(ch, val)\n return set_func\n\n def _gen_ch_get_func(self, fun, ch):\n def get_func():\n return fun(ch)\n return get_func\n\n # get state AWG\n def get_state(self):\n state = self.visa_handle.ask('AWGC:RSTATE?')\n if state.startswith('0'):\n return 'Idle'\n elif state.startswith('1'):\n return 'Waiting for trigger'\n elif state.startswith('2'):\n return 'Running'\n else:\n logging.error(__name__ + ' : AWG in undefined state')\n return 'error'\n\n def start(self):\n self.visa_handle.write('AWGC:RUN')\n return\n\n def stop(self):\n self.visa_handle.write('AWGC:STOP')\n\n def get_folder_contents(self):\n return self.visa_handle.ask('mmem:cat?')\n\n def get_current_folder_name(self):\n return self.visa_handle.ask('mmem:cdir?')\n\n def set_current_folder_name(self, file_path):\n self.visa_handle.write('mmem:cdir \"%s\"' % file_path)\n\n def change_folder(self, dir):\n self.visa_handle.write('mmem:cdir \"%s\"' % dir)\n\n def goto_root(self):\n self.visa_handle.write('mmem:cdir')\n\n def make_directory(self, dir, root):\n '''\n makes a directory\n if root = True, new dir in main folder\n '''\n if root:\n self.goto_root()\n self.visa_handle.write('MMEMory:MDIRectory \"{}\"'.format(dir))\n else:\n self.visa_handle.write('MMEMory:MDIRectory \"{}\"'.format(dir))\n\n def get_all(self, update=True):\n # TODO: fix bug in snapshot where it tries to get setable only param\n # return self.snapshot(update=update)\n\n return self.snapshot(update=False)\n\n def clear_waveforms(self):\n '''\n Clears the waveform on both channels.\n\n Input:\n None\n\n Output:\n None\n '''\n logging.debug(__name__ + ' : Clear waveforms from channels')\n self.visa_handle.write('SOUR1:FUNC:USER \"\"')\n self.visa_handle.write('SOUR2:FUNC:USER \"\"')\n\n def force_trigger(self):\n '''\n forces a trigger event (used for wait_trigger option in sequences)\n\n Ron\n '''\n return self.visa_handle.write('TRIG:SEQ:IMM')\n\n def force_logicjump(self):\n '''\n forces a jumplogic event (used as a conditional event during waveform\n executions)\n\n note: jump_logic events&mode have to be set properly!\n\n Ron\n '''\n return self.visa_handle.write('AWGC:EVEN:SEQ:IMM')\n\n def set_jumpmode(self, mode):\n '''\n sets the jump mode for jump logic events, possibilities:\n LOGic,TABle,SOFTware\n give mode as string\n\n note: jump_logic events&mode have to be set properly!\n\n Ron\n '''\n return self.visa_handle.write('AWGC:ENH:SEQ:JMOD %s' % mode)\n\n def get_jumpmode(self, mode):\n '''\n get the jump mode for jump logic events\n\n Ron\n '''\n return self.visa_handle.ask('AWGC:ENH:SEQ:JMOD?')\n\n def _do_get_numpoints(self):\n '''\n Returns the number of datapoints in each wave\n\n Input:\n None\n\n Output:\n numpoints (int) : Number of datapoints in each wave\n '''\n return self._numpoints\n\n def _do_set_numpoints(self, numpts):\n '''\n Sets the number of datapoints in each wave.\n This acts on both channels.\n\n Input:\n numpts (int) : The number of datapoints in each wave\n\n Output:\n None\n '''\n logging.debug(__name__ + ' : Trying to set numpoints to %s' % numpts)\n if numpts != self._numpoints:\n logging.warning(__name__ + ' : changing numpoints. This will clear all waveforms!')\n\n response = 'yes' # raw_input('type \"yes\" to continue')\n if response is 'yes':\n logging.debug(__name__ + ' : Setting numpoints to %s' % numpts)\n self._numpoints = numpts\n self.clear_waveforms()\n else:\n print('aborted')\n\n\n\n def set_setup_filename(self, fname, force_reload=False):\n if self._fname == fname and not force_reload:\n print('File %s already loaded in AWG520' % fname)\n return\n else:\n self._fname = fname\n filename = \"\\%s/%s.seq\" % (fname, fname)\n self.set_sequence(filename=filename)\n print('Waiting for AWG to load file \"%s\"' % fname)\n sleeptime = 0.5\n # while state idle is not possible due to timeout error while loading\n t0 = time.time()\n while(time.time()-t0 < 360):\n try:\n if self.get_state() == 'Idle':\n break\n except:\n time.sleep(sleeptime)\n print('.')\n self.get_state()\n print('Loading file took %.2fs' % (time.time()-t0))\n return\n\n def _do_set_filename(self, name, channel):\n '''\n Specifies which file has to be set on which channel\n Make sure the file exists, and the numpoints and clock of the file\n matches the instrument settings.\n\n If file doesn't exist an error is raised, if the numpoints doesn't match\n the command is neglected\n\n Input:\n name (string) : filename of uploaded file\n channel (int) : 1 or 2, the number of the designated channel\n\n Output:\n None\n '''\n logging.debug(__name__ + ' : Try to set {} on channel {}'.format(\n name, channel))\n exists = False\n if name in self._values['files']:\n exists = True\n logging.debug(__name__ + ' : File exists in loacal memory')\n self._values['recent_channel_%s' % channel] = self._values[\n 'files'][name]\n self._values['recent_channel_%s' % channel]['filename'] = name\n else:\n logging.debug(__name__ + ' : File does not exist in memory, \\\n reading from instrument')\n lijst = self.visa_handle.ask('MMEM:CAT? \"MAIN\"')\n bool = False\n bestand = \"\"\n for i in range(len(lijst)):\n if (lijst[i] =='\"'):\n bool = True\n elif (lijst[i] == ','):\n bool = False\n if (bestand == name):\n exists = True\n bestand = \"\"\n elif bool:\n bestand = bestand + lijst[i]\n if exists:\n data = self.visa_handle.ask('MMEM:DATA? \"%s\"' %name)\n logging.debug(__name__ + ' : File exists on instrument, loading \\\n into local memory')\n # string alsvolgt opgebouwd: '#' <lenlen1> <len> 'MAGIC 1000\\r\\n' '#' <len waveform> 'CLOCK ' <clockvalue>\n len1 = int(data[1])\n len2 = int(data[2:2+len1])\n i = len1\n tekst = \"\"\n while (tekst !='#'):\n tekst = data[i]\n i = i+1\n len3 = int(data[i])\n len4 = int(data[i+1:i+1+len3])\n\n w = []\n m1 = []\n m2 = []\n\n for q in range(i+1+len3, i+1+len3+len4, 5):\n j = int(q)\n c, d = struct.unpack('<fB', data[j:5+j])\n w.append(c)\n m2.append(int(d/2))\n m1.append(d-2*int(d/2))\n\n clock = float(data[i+1+len3+len4+5:len(data)])\n\n self._values['files'][name] = {}\n self._values['files'][name]['w'] = w\n self._values['files'][name]['m1'] = m1\n self._values['files'][name]['m2'] = m2\n self._values['files'][name]['clock'] = clock\n self._values['files'][name]['numpoints'] = len(w)\n\n self._values['recent_channel_%s' %channel] = self._values['files'][name]\n self._values['recent_channel_%s' %channel]['filename'] = name\n else:\n logging.error(__name__ + ' : Invalid filename specified %s' %name)\n\n if (self._numpoints==self._values['files'][name]['numpoints']):\n logging.debug(__name__ + ' : Set file %s on channel %s' % (name, channel))\n self.visa_handle.write('SOUR%s:FUNC:USER \"%s\",\"MAIN\"' % (channel, name))\n else:\n self.visa_handle.write('SOUR%s:FUNC:USER \"%s\",\"MAIN\"' % (channel, name))\n logging.warning(__name__ + ' : Verkeerde lengte %s ipv %s'\n %(self._values['files'][name]['numpoints'], self._numpoints))\n\n\n # Ask for string with filenames\n def get_filenames(self):\n logging.debug(__name__ + ' : Read filenames from instrument')\n return self.visa_handle.ask('MMEM:CAT? \"MAIN\"')\n\n def return_self(self):\n return self\n # Send waveform to the device\n\n def send_waveform(self, w, m1, m2, filename, clock):\n '''\n Sends a complete waveform. All parameters need to be specified.\n choose a file extension 'wfm' (must end with .pat)\n See also: resend_waveform()\n\n Input:\n w (float[numpoints]) : waveform\n m1 (int[numpoints]) : marker1\n m2 (int[numpoints]) : marker2\n filename (string) : filename\n clock (int) : frequency (Hz)\n\n Output:\n None\n '''\n logging.debug(__name__ + ' : Sending waveform %s to instrument' % filename)\n\n # Check for errors\n dim = len(w)\n\n if (not((len(w) == len(m1)) and ((len(m1) == len(m2))))):\n return 'error'\n self._values['files'][filename] = {}\n self._values['files'][filename]['w'] = w\n self._values['files'][filename]['m1'] = m1\n self._values['files'][filename]['m2'] = m2\n self._values['files'][filename]['clock'] = clock\n self._values['files'][filename]['numpoints'] = len(w)\n\n m = m1 + np.multiply(m2, 2)\n ws = ''\n for i in range(0, len(w)):\n ws = ws + struct.pack('<fB', w[i], int(m[i]))\n s1 = 'MMEM:DATA \"%s\",' % filename\n s3 = 'MAGIC 1000\\n'\n s5 = ws\n s6 = 'CLOCK %.10e\\n' % clock\n\n s4 = '#' + str(len(str(len(s5)))) + str(len(s5))\n lenlen = str(len(str(len(s6) + len(s5) + len(s4) + len(s3))))\n s2 = '#' + lenlen + str(len(s6) + len(s5) + len(s4) + len(s3))\n\n mes = s1 + s2 + s3 + s4 + s5 + s6\n self.visa_handle.write(mes)\n\n def send_pattern(self, w, m1, m2, filename, clock):\n '''\n Sends a pattern file.\n similar to waveform except diff file extension\n number of poitns different. diff byte conversion\n See also: resend_waveform()\n\n Input:\n w (float[numpoints]) : waveform\n m1 (int[numpoints]) : marker1\n m2 (int[numpoints]) : marker2\n filename (string) : filename\n clock (int) : frequency (Hz)\n\n Output:\n None\n '''\n logging.debug(__name__ + ' : Sending pattern %s to instrument' % filename)\n\n # Check for errors\n dim = len(w)\n if (not((len(w)==len(m1)) and ((len(m1)==len(m2))))):\n return 'error'\n self._values['files'][filename]={}\n self._values['files'][filename]['w']=w\n self._values['files'][filename]['m1']=m1\n self._values['files'][filename]['m2']=m2\n self._values['files'][filename]['clock']=clock\n self._values['files'][filename]['numpoints']=len(w)\n\n m = m1 + np.multiply(m2, 2)\n ws = ''\n for i in range(0, len(w)):\n ws = ws + struct.pack('<fB', w[i], int(m[i]))\n\n s1 = 'MMEM:DATA \"%s\",' % filename\n s3 = 'MAGIC 2000\\n'\n s5 = ws\n s6 = 'CLOCK %.10e\\n' % clock\n\n s4 = '#' + str(len(str(len(s5)))) + str(len(s5))\n lenlen=str(len(str(len(s6) + len(s5) + len(s4) + len(s3))))\n s2 = '#' + lenlen + str(len(s6) + len(s5) + len(s4) + len(s3))\n\n mes = s1 + s2 + s3 + s4 + s5 + s6\n self.visa_handle.write(mes)\n\n\n def resend_waveform(self, channel, w=[], m1=[], m2=[], clock=[]):\n '''\n Resends the last sent waveform for the designated channel\n Overwrites only the parameters specifiedta\n\n Input: (mandatory)\n channel (int) : 1 or 2, the number of the designated channel\n\n Input: (optional)\n w (float[numpoints]) : waveform\n m1 (int[numpoints]) : marker1\n m2 (int[numpoints]) : marker2\n clock (int) : frequency\n\n Output:\n None\n '''\n filename = self._values['recent_channel_%s' %channel]['filename']\n logging.debug(__name__ + ' : Resending %s to channel %s' % (filename, channel))\n\n\n if (w==[]):\n w = self._values['recent_channel_%s' %channel]['w']\n if (m1==[]):\n m1 = self._values['recent_channel_%s' %channel]['m1']\n if (m2==[]):\n m2 = self._values['recent_channel_%s' %channel]['m2']\n if (clock==[]):\n clock = self._values['recent_channel_%s' %channel]['clock']\n\n if not ( (len(w) == self._numpoints) and (len(m1) == self._numpoints) and (len(m2) == self._numpoints)):\n logging.error(__name__ + ' : one (or more) lengths of waveforms do not match with numpoints')\n\n self.send_waveform(w, m1, m2, filename, clock)\n self.do_set_filename(filename, channel)\n\n def delete_all_waveforms_from_list(self):\n '''\n for compatibillity with awg, is not relevant for AWG520 since it\n has no waveform list\n '''\n pass\n\n def send_sequence(self, wfs, rep, wait, goto, logic_jump, filename):\n '''\n Sends a sequence file (for the moment only for ch1)\n\n Args:\n\n wfs: list of filenames\n\n Returs:\n\n None\n '''\n logging.debug(__name__ + ' : Sending sequence %s to instrument' % filename)\n N = str(len(rep))\n try:\n wfs.remove(N*[None])\n except ValueError:\n pass\n s1 = 'MMEM:DATA \"%s\",' % filename\n\n if len(np.shape(wfs)) ==1:\n s3 = 'MAGIC 3001\\n'\n s5 = ''\n for k in range(len(rep)):\n s5 = s5+ '\"%s\",%s,%s,%s,%s\\n'%(wfs[k],rep[k],wait[k],goto[k],logic_jump[k])\n\n else:\n s3 = 'MAGIC 3002\\n'\n s5 = ''\n for k in range(len(rep)):\n s5 = s5+ '\"%s\",\"%s\",%s,%s,%s,%s\\n'%(wfs[0][k],wfs[1][k],rep[k],wait[k],goto[k],logic_jump[k])\n\n s4 = 'LINES %s\\n'%N\n lenlen=str(len(str(len(s5) + len(s4) + len(s3))))\n s2 = '#' + lenlen + str(len(s5) + len(s4) + len(s3))\n\n\n mes = s1 + s2 + s3 + s4 + s5\n self.visa_handle.write(mes)\n\n def send_sequence2(self,wfs1,wfs2,rep,wait,goto,logic_jump,filename):\n '''\n Sends a sequence file\n\n Args:\n wfs1: list of filenames for ch1 (all must end with .pat)\n wfs2: list of filenames for ch2 (all must end with .pat)\n rep: list\n wait: list\n goto: list\n logic_jump: list\n filename: name of output file (must end with .seq)\n\n Returns:\n None\n '''\n logging.debug(__name__ + ' : Sending sequence %s to instrument' % filename)\n\n\n N = str(len(rep))\n s1 = 'MMEM:DATA \"%s\",' % filename\n s3 = 'MAGIC 3002\\n'\n s4 = 'LINES %s\\n'%N\n s5 = ''\n\n\n for k in range(len(rep)):\n s5 = s5+ '\"%s\",\"%s\",%s,%s,%s,%s\\n'%(wfs1[k],wfs2[k],rep[k],wait[k],goto[k],logic_jump[k])\n\n lenlen=str(len(str(len(s5) + len(s4) + len(s3))))\n s2 = '#' + lenlen + str(len(s5) + len(s4) + len(s3))\n\n\n mes = s1 + s2 + s3 + s4 + s5\n self.visa_handle.write(mes)\n\n def set_sequence(self,filename):\n '''\n loads a sequence file on all channels.\n Waveforms/patterns to be executed on respective channel\n must be defined inside the sequence file itself\n make sure to send all waveforms before setting a seq\n '''\n self.visa_handle.write('SOUR%s:FUNC:USER \"%s\",\"MAIN\"' % (1, filename))\n\n def load_and_set_sequence(self,wfs,rep,wait,goto,logic_jump,filename):\n '''\n Loads and sets the awg sequecne\n '''\n self.send_sequence(wfs,rep,wait,goto,logic_jump,filename)\n self.set_sequence(filename)\n\n",
"import numpy as np\nimport logging\nimport h5py\nimport os\nimport json\n\nfrom .data_array import DataArray\nfrom .format import Formatter\n\n\nclass HDF5Format(Formatter):\n \"\"\"\n HDF5 formatter for saving qcodes datasets.\n\n Capable of storing (write) and recovering (read) qcodes datasets.\n \"\"\"\n\n def close_file(self, data_set):\n \"\"\"\n Closes the hdf5 file open in the dataset.\n \"\"\"\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')\n\n def _create_file(self, filepath):\n \"\"\"\n creates a hdf5 file (data_object) at a location specifed by\n filepath\n \"\"\"\n folder, _filename = os.path.split(filepath)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n file = h5py.File(filepath, 'a')\n return file\n\n def _open_file(self, data_set, location=None):\n if location is None:\n location = data_set.location\n filepath = self._filepath_from_location(location,\n io_manager=data_set.io)\n data_set._h5_base_group = h5py.File(filepath, 'r+')\n\n def read(self, data_set, location=None):\n \"\"\"\n Reads an hdf5 file specified by location into a data_set object.\n If no data_set is provided will creata an empty data_set to read into.\n If no location is provided will use the location specified in the\n dataset.\n \"\"\"\n self._open_file(data_set, location)\n\n for i, array_id in enumerate(\n data_set._h5_base_group['Data Arrays'].keys()):\n # Decoding string is needed because of h5py/issues/379\n name = array_id # will be overwritten if not in file\n dat_arr = data_set._h5_base_group['Data Arrays'][array_id]\n\n # write ensures these attributes always exist\n name = dat_arr.attrs['name'].decode()\n label = dat_arr.attrs['label'].decode()\n\n # get unit from units if no unit field, for backward compatibility\n if 'unit' in dat_arr.attrs:\n unit = dat_arr.attrs['unit'].decode()\n else:\n unit = dat_arr.attrs['units'].decode()\n\n is_setpoint = str_to_bool(dat_arr.attrs['is_setpoint'].decode())\n # if not is_setpoint:\n set_arrays = dat_arr.attrs['set_arrays']\n set_arrays = [s.decode() for s in set_arrays]\n # else:\n # set_arrays = ()\n vals = dat_arr.value[:, 0]\n if 'shape' in dat_arr.attrs.keys():\n vals = vals.reshape(dat_arr.attrs['shape'])\n if array_id not in data_set.arrays.keys(): # create new array\n d_array = DataArray(\n name=name, array_id=array_id, label=label, parameter=None,\n unit=unit,\n is_setpoint=is_setpoint, set_arrays=(),\n preset_data=vals)\n data_set.add_array(d_array)\n else: # update existing array with extracted values\n d_array = data_set.arrays[array_id]\n d_array.name = name\n d_array.label = label\n d_array.unit = unit\n d_array.is_setpoint = is_setpoint\n d_array.ndarray = vals\n d_array.shape = dat_arr.attrs['shape']\n # needed because I cannot add set_arrays at this point\n data_set.arrays[array_id]._sa_array_ids = set_arrays\n\n # Add copy/ref of setarrays (not array id only)\n # Note, this is not pretty but a result of how the dataset works\n for array_id, d_array in data_set.arrays.items():\n for sa_id in d_array._sa_array_ids:\n d_array.set_arrays += (data_set.arrays[sa_id], )\n data_set = self.read_metadata(data_set)\n return data_set\n\n def _filepath_from_location(self, location, io_manager):\n filename = os.path.split(location)[-1]\n filepath = io_manager.to_path(location +\n '/{}.hdf5'.format(filename))\n return filepath\n\n def _create_data_object(self, data_set, io_manager=None,\n location=None):\n # Create the file if it is not there yet\n if io_manager is None:\n io_manager = data_set.io\n if location is None:\n location = data_set.location\n filepath = self._filepath_from_location(location, io_manager)\n # note that this creates an hdf5 file in a folder with the same\n # name. This is useful for saving e.g. images in the same folder\n # I think this is a sane default (MAR).\n data_set._h5_base_group = self._create_file(filepath)\n return data_set._h5_base_group\n\n def write(self, data_set, io_manager=None, location=None,\n force_write=False, flush=True, write_metadata=True,\n only_complete=False):\n \"\"\"\n Writes a data_set to an hdf5 file.\n\n Args:\n data_set: qcodes data_set to write to hdf5 file\n io_manager: io_manger used for providing path\n location: location can be used to specify custom location\n force_write (bool): if True creates a new file to write to\n flush (bool) : whether to flush after writing, can be disabled\n for testing or performance reasons\n only_complete (bool): Not used by this formatter, but must be\n included in the call signature to avoid an \"unexpected\n keyword argument\" TypeError.\n\n N.B. It is recommended to close the file after writing, this can be\n done by calling ``HDF5Format.close_file(data_set)`` or\n ``data_set.finalize()`` if the data_set formatter is set to an\n hdf5 formatter. Note that this is not required if the dataset\n is created from a Loop as this includes a data_set.finalize()\n statement.\n\n The write function consists of two parts, writing DataArrays and\n writing metadata.\n\n - The main part of write consists of writing and resizing arrays,\n the resizing providing support for incremental writes.\n\n - write_metadata is called at the end of write and dumps a\n dictionary to an hdf5 file. If there already is metadata it will\n delete this and overwrite it with current metadata.\n\n \"\"\"\n if not hasattr(data_set, '_h5_base_group') or force_write:\n data_set._h5_base_group = self._create_data_object(\n data_set, io_manager, location)\n\n data_name = 'Data Arrays'\n\n if data_name not in data_set._h5_base_group.keys():\n arr_group = data_set._h5_base_group.create_group(data_name)\n else:\n arr_group = data_set._h5_base_group[data_name]\n\n for array_id in data_set.arrays.keys():\n if array_id not in arr_group.keys() or force_write:\n self._create_dataarray_dset(array=data_set.arrays[array_id],\n group=arr_group)\n dset = arr_group[array_id]\n # Resize the dataset and add the new values\n\n # dataset refers to the hdf5 dataset here\n datasetshape = dset.shape\n old_dlen = datasetshape[0]\n x = data_set.arrays[array_id]\n new_dlen = len(x[~np.isnan(x)])\n new_datasetshape = (new_dlen,\n datasetshape[1])\n dset.resize(new_datasetshape)\n new_data_shape = (new_dlen - old_dlen, datasetshape[1])\n dset[old_dlen:new_dlen] = x[old_dlen:new_dlen].reshape(\n new_data_shape)\n # allow resizing extracted data, here so it gets written for\n # incremental writes aswell\n dset.attrs['shape'] = x.shape\n if write_metadata:\n self.write_metadata(\n data_set, io_manager=io_manager, location=location)\n\n # flush ensures buffers are written to disk\n # (useful for ensuring openable by other files)\n if flush:\n data_set._h5_base_group.file.flush()\n\n def _create_dataarray_dset(self, array, group):\n '''\n input arguments\n array: Dataset data array\n group: group in the hdf5 file where the dset will be created\n\n creates a hdf5 datasaset that represents the data array.\n '''\n # Check for empty meta attributes, use array_id if name and/or label\n # is not specified\n if array.label is not None:\n label = array.label\n else:\n label = array.array_id\n\n if array.name is not None:\n name = array.name\n else:\n name = array.array_id\n\n # Create the hdf5 dataset\n dset = group.create_dataset(\n array.array_id, (0, 1),\n maxshape=(None, 1))\n dset.attrs['label'] = _encode_to_utf8(str(label))\n dset.attrs['name'] = _encode_to_utf8(str(name))\n dset.attrs['unit'] = _encode_to_utf8(str(array.unit or ''))\n dset.attrs['is_setpoint'] = _encode_to_utf8(str(array.is_setpoint))\n\n set_arrays = []\n # list will remain empty if array does not have set_array\n for i in range(len(array.set_arrays)):\n set_arrays += [_encode_to_utf8(\n str(array.set_arrays[i].array_id))]\n dset.attrs['set_arrays'] = set_arrays\n\n return dset\n\n def write_metadata(self, data_set, io_manager=None, location=None, read_first=True):\n \"\"\"\n Writes metadata of dataset to file using write_dict_to_hdf5 method\n\n Note that io and location are arguments that are only here because\n of backwards compatibility with the loop.\n This formatter uses io and location as specified for the main\n dataset.\n The read_first argument is ignored.\n \"\"\"\n if not hasattr(data_set, '_h5_base_group'):\n # added here because loop writes metadata before data itself\n data_set._h5_base_group = self._create_data_object(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n del data_set._h5_base_group['metadata']\n metadata_group = data_set._h5_base_group.create_group('metadata')\n self.write_dict_to_hdf5(data_set.metadata, metadata_group)\n\n def write_dict_to_hdf5(self, data_dict, entry_point):\n for key, item in data_dict.items():\n if isinstance(item, (str, bool, tuple, float, int)):\n entry_point.attrs[key] = item\n elif isinstance(item, np.ndarray):\n entry_point.create_dataset(key, data=item)\n elif item is None:\n # as h5py does not support saving None as attribute\n # I create special string, note that this can create\n # unexpected behaviour if someone saves a string with this name\n entry_point.attrs[key] = 'NoneType:__None__'\n elif isinstance(item, dict):\n entry_point.create_group(key)\n self.write_dict_to_hdf5(data_dict=item,\n entry_point=entry_point[key])\n elif isinstance(item, list):\n if len(item) > 0:\n elt_type = type(item[0])\n if all(isinstance(x, elt_type) for x in item):\n if isinstance(item[0], (int, float,\n np.int32, np.int64)):\n\n entry_point.create_dataset(key,\n data=np.array(item))\n entry_point[key].attrs['list_type'] = 'array'\n elif isinstance(item[0], str):\n dt = h5py.special_dtype(vlen=str)\n data = np.array(item)\n data = data.reshape((-1, 1))\n ds = entry_point.create_dataset(\n key, (len(data), 1), dtype=dt)\n ds[:] = data\n elif isinstance(item[0], dict):\n entry_point.create_group(key)\n group_attrs = entry_point[key].attrs\n group_attrs['list_type'] = 'dict'\n base_list_key = 'list_idx_{}'\n group_attrs['base_list_key'] = base_list_key\n group_attrs['list_length'] = len(item)\n for i, list_item in enumerate(item):\n list_item_grp = entry_point[key].create_group(\n base_list_key.format(i))\n self.write_dict_to_hdf5(\n data_dict=list_item,\n entry_point=list_item_grp)\n else:\n logging.warning(\n 'List of type \"{}\" for \"{}\":\"{}\" not '\n 'supported, storing as string'.format(\n elt_type, key, item))\n entry_point.attrs[key] = str(item)\n else:\n logging.warning(\n 'List of mixed type for \"{}\":\"{}\" not supported, '\n 'storing as string'.format(type(item), key, item))\n entry_point.attrs[key] = str(item)\n else:\n # as h5py does not support saving None as attribute\n entry_point.attrs[key] = 'NoneType:__emptylist__'\n\n else:\n logging.warning(\n 'Type \"{}\" for \"{}\":\"{}\" not supported, '\n 'storing as string'.format(type(item), key, item))\n entry_point.attrs[key] = str(item)\n\n def read_metadata(self, data_set):\n \"\"\"\n Reads in the metadata, this is also called at the end of a read\n statement so there should be no need to call this explicitly.\n \"\"\"\n # checks if there is an open file in the dataset as load_data does\n # reading of metadata before reading the complete dataset\n if not hasattr(self, '_h5_base_group'):\n self._open_file(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n metadata_group = data_set._h5_base_group['metadata']\n self.read_dict_from_hdf5(data_set.metadata, metadata_group)\n return data_set\n\n def read_dict_from_hdf5(self, data_dict, h5_group):\n if 'list_type' not in h5_group.attrs:\n for key, item in h5_group.items():\n if isinstance(item, h5py.Group):\n data_dict[key] = {}\n data_dict[key] = self.read_dict_from_hdf5(data_dict[key],\n item)\n else: # item either a group or a dataset\n if 'list_type' not in item.attrs:\n data_dict[key] = item.value\n else:\n data_dict[key] = list(item.value)\n for key, item in h5_group.attrs.items():\n if type(item) is str:\n # Extracts \"None\" as an exception as h5py does not support\n # storing None, nested if statement to avoid elementwise\n # comparison warning\n if item == 'NoneType:__None__':\n item = None\n elif item == 'NoneType:__emptylist__':\n item = []\n data_dict[key] = item\n elif h5_group.attrs['list_type'] == 'dict':\n # preallocate empty list\n list_to_be_filled = [None] * h5_group.attrs['list_length']\n base_list_key = h5_group.attrs['base_list_key']\n for i in range(h5_group.attrs['list_length']):\n list_to_be_filled[i] = {}\n self.read_dict_from_hdf5(\n data_dict=list_to_be_filled[i],\n h5_group=h5_group[base_list_key.format(i)])\n\n # THe error is here!, extract correctly but not adding to\n # data dict correctly\n data_dict = list_to_be_filled\n else:\n raise NotImplementedError('cannot read \"list_type\":\"{}\"'.format(\n h5_group.attrs['list_type']))\n return data_dict\n\n\ndef _encode_to_utf8(s):\n \"\"\"\n Required because h5py does not support python3 strings\n converts byte type to string\n \"\"\"\n return s.encode('utf-8')\n\n\ndef str_to_bool(s):\n if s == 'True':\n return True\n elif s == 'False':\n return False\n else:\n raise ValueError(\"Cannot covert {} to a bool\".format(s))\n\nfrom qcodes.utils.helpers import deep_update, NumpyJSONEncoder\n\n\nclass HDF5FormatMetadata(HDF5Format):\n\n metadata_file = 'snapshot.json'\n\n def write_metadata(self, data_set, io_manager=None, location=None, read_first=False):\n \"\"\"\n Write all metadata in this DataSet to storage.\n\n Args:\n data_set (DataSet): the data we're storing\n\n io_manager (io_manager): the base location to write to\n\n location (str): the file location within io_manager\n\n read_first (bool, optional): read previously saved metadata before\n writing? The current metadata will still be the used if\n there are changes, but if the saved metadata has information\n not present in the current metadata, it will be retained.\n Default True.\n \"\"\"\n\n # this statement is here to make the linter happy\n if io_manager is None or location is None:\n raise Exception('please set io_manager and location arguments ')\n\n if read_first:\n # In case the saved file has more metadata than we have here,\n # read it in first. But any changes to the in-memory copy should\n # override the saved file data.\n memory_metadata = data_set.metadata\n data_set.metadata = {}\n self.read_metadata(data_set)\n deep_update(data_set.metadata, memory_metadata)\n\n fn = io_manager.join(location, self.metadata_file)\n with io_manager.open(fn, 'w', encoding='utf8') as snap_file:\n json.dump(data_set.metadata, snap_file, sort_keys=True,\n indent=4, ensure_ascii=False, cls=NumpyJSONEncoder)\n\n def read_metadata(self, data_set):\n io_manager = data_set.io\n location = data_set.location\n fn = io_manager.join(location, self.metadata_file)\n if io_manager.list(fn):\n with io_manager.open(fn, 'r') as snap_file:\n metadata = json.load(snap_file, encoding='utf8')\n data_set.metadata.update(metadata)\n"
] |
[
[
"numpy.shape",
"numpy.multiply"
],
[
"numpy.isnan",
"numpy.array"
]
] |
SwetaSengupta/lambdata-SwetaSengupta
|
[
"a3f679f2cabb706b0c770f0cf9aebe57df7dcc59"
] |
[
"my_lambdata/my_script.py"
] |
[
"#my script.py\r\n\r\nfrom pandas import DataFrame\r\n#from my_mod import enlarge #this works\r\nfrom my_lambdata.my_mod import enlarge\r\n\r\nprint(\"Hello\")\r\n\r\ndf = DataFrame({\"a\":[1,2,3],\"b\":[4,5,6]})\r\nprint(df.head())\r\nx = 11\r\nprint(enlarge(x))\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
grace-mengke-hu/TRIANGULUM
|
[
"2c4c7c0e67f5fcbbd2cb0934839bb2784b80d47b"
] |
[
"Training/CRFannot_6concepts.py"
] |
[
"#!../../anaconda2/bin/python\n\nimport re\nimport pickle\nimport reader\nimport RegExp\nimport spacy\nimport CRFfeature\n\nfrom itertools import chain\nimport nltk\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.preprocessing import LabelBinarizer\nimport sklearn\nimport pycrfsuite\nimport time\n\n#load spacy english model\nnlp = spacy.load(\"en_core_web_sm\")\n\n\n#load feature data with filename\ndata1 = pickle.load( open( \"4_10_findAnotData.p\", \"rb\" ) )\ndata2 = pickle.load( open( \"11_50_findAnotData.p\", \"rb\" ) )\ndata3 = pickle.load( open( \"51_100_findAnotData.p\", \"rb\" ) )\ndata4 = pickle.load( open( \"101_218_findAnotData.p\", \"rb\" ) )\ndata5 = pickle.load( open( \"218_1000_findAnotData.p\", \"rb\" ) )\n\nfeatData = data1+data2+data3+data4+data5\n\nsentsTokenDic = {}\nfor d in featData: #[featData[0]]:#test on one sample\n\tcorpusFilename = d['filename']\n\tparseList = corpusFilename.split('/')\n\tadjudicationFilename = '/'.join(parseList[0:-2])+'/Adjudication/'+parseList[-1]+'.knowtator.xml'\n\tsubreddit = parseList[-1].split('_')[-1].replace('.txt','')\n\t#get corpus data and annotation data\n\tanotDicList = reader.annotFileReader(adjudicationFilename)\n\tcorpusData = reader.corpusFileReader(corpusFilename)\n\n\t#NLP NER and Annotation\n\tdoc = nlp(corpusData.decode('UTF8'))\n\t#sentsTokenDic = {}\n\t#dataSentList = []\n\tfor x in doc:\n\t\n\t\t#search for the annotation for this token\n\t\txStart = x.idx\n\t\txEnd = x.idx+len(x.text)-1\n\t\tclassLabel = ''\n\t\tfor anot in anotDicList:\n\t\t\tif min(anot['end'],xEnd)-max(anot['start'],xStart) >0:\n\t\t\t\tclassLabel = anot['class']\n\n\t\tif str(x.sent) in sentsTokenDic.keys():#REMARK: here str() is to convert any UTF8 char in string\n\t\t\tsentsTokenDic[str(x.sent)].append((str(x).decode('utf-8', 'ignore'), x.ent_type_, classLabel.decode('utf-8') ))\n\t\telse:#new sentence\n\t\t\tsentsTokenDic[str(x.sent)] = []\n\t\t\n\n#print(sentsTokenDic)\t\nprint('number of sentences: ',len(sentsTokenDic))\n\n#Form training and testing data\nsentenceData = sentsTokenDic.values()\n#print(sentenceData[0])\nX_train = [CRFfeature.sent2features(s) for s in sentenceData[0:2500]]\ny_train = [CRFfeature.sent2labels(s) for s in sentenceData[0:2500]]\n\nX_test = [CRFfeature.sent2features(s) for s in sentenceData[2500:]]\ny_test = [CRFfeature.sent2labels(s) for s in sentenceData[2500:]]\n\n#create pycrfsuite.Trainer and load the training data to CRFsuite\ntime_start = time.time()\ntrainer = pycrfsuite.Trainer(verbose=False)\n\nfor xseq, yseq in zip(X_train, y_train):\n\ttrainer.append(xseq, yseq)\n\n#Set training parameters. We will use L-BFGS training algorithm (it is default) with Elastic Net (L1 + L2) regularization.\ntrainer.set_params({\n 'c1': 1.0, # coefficient for L1 penalty\n 'c2': 1e-3, # coefficient for L2 penalty\n 'max_iterations': 50, # stop earlier\n\n # include transitions that are possible, but not observed\n 'feature.possible_transitions': True\n})\nprint(trainer.params())\ntime_train_end = time.time()\nprint(\"TRAINING TIME:\", time_train_end-time_start)\n\n#Training\n#%%time\ntrainer.train('redditUserBasedBin.crfsuite')\nprint(trainer.logparser.last_iteration)\n\n##Testing\ntagger = pycrfsuite.Tagger()\ntagger.open('redditUserBasedBin.crfsuite')\ny_pred = [tagger.tag(xseq) for xseq in X_test]\ntime_test_end = time.time()\nprint(\"TESTING TAGGING:\",time_test_end-time_start)\n\nconcepts = ['VAPING_MJ','VAPING_NIC/TOBACCO','COMBUST_MJ','COMBUST_TOBACCO','BRAND','SMOKING_CESSATION']\n\n#evaluation function\ndef bio_classification_report(y_true, y_pred):\n\t\"\"\"\n\tClassification report for a list of BIO-encoded sequences.\n\tIt computes token-level metrics and discards \"O\" labels.\n \n\tNote that it requires scikit-learn 0.15+ (or a version from github master)\n\tto calculate averages properly!\n\t\"\"\"\n\tlb = LabelBinarizer() \n\ty_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))\n\ty_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))\n \n\ttagset = set(lb.classes_)-{''} \n#['CO-USE_COMBUST_MJ_COMBUST_TOBACCO', 'DUAL-USE_COMBUST_MJ_VAPING_MJ', 'DUAL-USE_COMBUST_MJ_VAPE_NIC/TOBACCO', 'DUAL-USE_COMBUST_MJ_COMBUST_TOBACCO', 'DUAL-USE_VAPING_MJ_VAPING_NIC/TOBACCO','DUAL-USE_COMBUST_TOBACCO_VAPING_NIC/TOBACCO','DUAL-USE_VAPING_MJ_COMBUST_TOBACCO','POLY-USE_COMBUST_TOBACCO_COMBUST_MJ_VAPING_MJ','POLY-USE_COMBUST_TOBACCO_COMBUST_MJ_VAPING_NIC/TOBACCO','POLY_USE_COMBUST_TOABBCO_VAPING_MJ_VAPING_NIC/TOBACCO','POLY_USE_COMBUST_MJ_VAPING_MJ_VAPING_NIC/TOBACCO','COMBUST_MJ','COMBUST_TOBACCO','VAPING_MJ','VAPING_NIC/TOBACCO','BRAND','VAPING','SMOKING_CESSATION'] \n#set(lb.classes_) - {''}\n\ttagset = sorted(tagset)#NOT NEED SORTING FUNCTION: key=lambda tag: tag.split('-', 1)[::-1])\n\tclass_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}\n \n\treturn classification_report(\n\t\ty_true_combined,\n\t\ty_pred_combined,\n\t\tlabels = [class_indices[cls] for cls in tagset],\n\t\ttarget_names = tagset,\n\t)\n\n#Evaluation\ny_test_group = []\ny_pred_group = []\nfor i in range(len(y_test)):\n\ts_test = []\n\tfor t in y_test[i]:\n\t\tif t in concepts:\n\t\t\ts_test.append('Group_Concept')\n\t\telse:\n\t\t\ts_test.append(t)\n\ty_test_group.append(s_test)\n\n\ts_pred = []\n\tfor t in y_pred[i]:\n\t\tif t in concepts:\n\t\t\ts_pred.append('Group_Concept')\n\t\telse:\n\t\t\ts_pred.append(t)\n\ty_pred_group.append(s_pred)\n\t\t\t\n#print(y_test)\n#print(y_pred)\nprint(bio_classification_report(y_test_group, y_pred_group))\n\n\n"
] |
[
[
"sklearn.metrics.classification_report",
"sklearn.preprocessing.LabelBinarizer"
]
] |
ljjyxz123/Non-local_pytorch
|
[
"b7bfd2501083b49f70100588eab8d17d1069aa75"
] |
[
"Non-Local_pytorch_0.3.1/lib/non_local_gaussian.py"
] |
[
"import torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\n\r\n\r\nclass _NonLocalBlockND(nn.Module):\r\n def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):\r\n super(_NonLocalBlockND, self).__init__()\r\n\r\n assert dimension in [1, 2, 3]\r\n\r\n self.dimension = dimension\r\n self.sub_sample = sub_sample\r\n\r\n self.in_channels = in_channels\r\n self.inter_channels = inter_channels\r\n\r\n if self.inter_channels is None:\r\n self.inter_channels = in_channels // 2\r\n if self.inter_channels == 0:\r\n self.inter_channels = 1\r\n\r\n if dimension == 3:\r\n conv_nd = nn.Conv3d\r\n max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))\r\n bn = nn.BatchNorm3d\r\n elif dimension == 2:\r\n conv_nd = nn.Conv2d\r\n max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\r\n bn = nn.BatchNorm2d\r\n else:\r\n conv_nd = nn.Conv1d\r\n max_pool_layer = nn.MaxPool1d(kernel_size=(2))\r\n bn = nn.BatchNorm1d\r\n\r\n self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\r\n kernel_size=1, stride=1, padding=0)\r\n\r\n if bn_layer:\r\n self.W = nn.Sequential(\r\n conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\r\n kernel_size=1, stride=1, padding=0),\r\n bn(self.in_channels)\r\n )\r\n nn.init.constant(self.W[1].weight, 0)\r\n nn.init.constant(self.W[1].bias, 0)\r\n else:\r\n self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\r\n kernel_size=1, stride=1, padding=0)\r\n nn.init.constant(self.W.weight, 0)\r\n nn.init.constant(self.W.bias, 0)\r\n\r\n if sub_sample:\r\n self.g = nn.Sequential(self.g, max_pool_layer)\r\n self.phi = max_pool_layer\r\n\r\n def forward(self, x):\r\n '''\r\n :param x: (b, c, t, h, w)\r\n :return:\r\n '''\r\n\r\n batch_size = x.size(0)\r\n\r\n g_x = self.g(x).view(batch_size, self.inter_channels, -1)\r\n\r\n g_x = g_x.permute(0, 2, 1)\r\n\r\n theta_x = x.view(batch_size, self.in_channels, -1)\r\n theta_x = theta_x.permute(0, 2, 1)\r\n\r\n if self.sub_sample:\r\n phi_x = self.phi(x).view(batch_size, self.in_channels, -1)\r\n else:\r\n phi_x = x.view(batch_size, self.in_channels, -1)\r\n\r\n f = torch.matmul(theta_x, phi_x)\r\n f_div_C = F.softmax(f, dim=-1)\r\n\r\n y = torch.matmul(f_div_C, g_x)\r\n y = y.permute(0, 2, 1).contiguous()\r\n y = y.view(batch_size, self.inter_channels, *x.size()[2:])\r\n W_y = self.W(y)\r\n z = W_y + x\r\n\r\n return z\r\n\r\n\r\nclass NONLocalBlock1D(_NonLocalBlockND):\r\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\r\n super(NONLocalBlock1D, self).__init__(in_channels,\r\n inter_channels=inter_channels,\r\n dimension=1, sub_sample=sub_sample,\r\n bn_layer=bn_layer)\r\n\r\n\r\nclass NONLocalBlock2D(_NonLocalBlockND):\r\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\r\n super(NONLocalBlock2D, self).__init__(in_channels,\r\n inter_channels=inter_channels,\r\n dimension=2, sub_sample=sub_sample,\r\n bn_layer=bn_layer)\r\n\r\n\r\nclass NONLocalBlock3D(_NonLocalBlockND):\r\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\r\n super(NONLocalBlock3D, self).__init__(in_channels,\r\n inter_channels=inter_channels,\r\n dimension=3, sub_sample=sub_sample,\r\n bn_layer=bn_layer)\r\n\r\n\r\nif __name__ == '__main__':\r\n from torch.autograd import Variable\r\n import torch\r\n\r\n for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:\r\n img = Variable(torch.zeros(2, 3, 20))\r\n net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)\r\n out = net(img)\r\n print(out.size())\r\n\r\n img = Variable(torch.zeros(2, 3, 20, 20))\r\n net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)\r\n out = net(img)\r\n print(out.size())\r\n\r\n img = Variable(torch.randn(2, 3, 8, 20, 20))\r\n net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)\r\n out = net(img)\r\n print(out.size())\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.randn",
"torch.matmul",
"torch.nn.MaxPool3d",
"torch.nn.MaxPool2d",
"torch.nn.MaxPool1d",
"torch.nn.init.constant"
]
] |
turowicz/triton-inference-server
|
[
"79fda8ee30343faaca05788b75d4568c954a6b2e",
"79fda8ee30343faaca05788b75d4568c954a6b2e"
] |
[
"qa/L0_custom_param/param_test.py",
"qa/L0_unknown_rank/unknown_rank_test.py"
] |
[
"#!/usr/bin/python\n\n# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport argparse\nimport numpy as np\nimport os\nimport sys\nfrom builtins import range\nimport tritongrpcclient as grpcclient\nimport tritonhttpclient as httpclient\nfrom tritonclientutils.utils import np_to_triton_dtype\n\nFLAGS = None\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--verbose', action=\"store_true\", required=False, default=False,\n help='Enable verbose output')\n parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000',\n help='Inference server URL. Default is localhost:8000.')\n parser.add_argument('-i', '--protocol', type=str, required=False, default='http',\n help='Protocol (\"http\"/\"grpc\") used to ' +\n 'communicate with inference service. Default is \"http\".')\n\n FLAGS = parser.parse_args()\n if (FLAGS.protocol != \"http\") and (FLAGS.protocol != \"grpc\"):\n print(\"unexpected protocol \\\"{}\\\", expects \\\"http\\\" or \\\"grpc\\\"\".format(FLAGS.protocol))\n exit(1)\n\n client_util = httpclient if FLAGS.protocol == \"http\" else grpcclient\n\n model_name = \"param\"\n\n # Create the inference context for the model.\n client = client_util.InferenceServerClient(FLAGS.url, FLAGS.verbose)\n\n # Input tensor can be any size int32 vector...\n input_data = np.zeros(shape=1, dtype=np.int32)\n\n inputs = [client_util.InferInput(\n \"INPUT\", input_data.shape, np_to_triton_dtype(input_data.dtype))]\n inputs[0].set_data_from_numpy(input_data)\n\n results = client.infer(model_name, inputs)\n\n print(results)\n\n params = results.as_numpy(\"OUTPUT\")\n if params is None:\n print(\"error: expected 'OUTPUT'\")\n sys.exit(1)\n\n if params.size != 5:\n print(\"error: expected 5 output strings, got {}\".format(params.size))\n sys.exit(1)\n\n # Element type returned is different between HTTP and GRPC client.\n # The former is str and the latter is bytes\n params = [p if type(p) == str else p.decode('utf8') for p in params]\n p0 = params[0]\n if not p0.startswith(\"INPUT=0\"):\n print(\"error: expected INPUT=0 string, got {}\".format(p0))\n sys.exit(1)\n\n p1 = params[1]\n if not p1.startswith(\"server_0=\"):\n print(\"error: expected server_0 parameter, got {}\".format(p1))\n sys.exit(1)\n\n p2 = params[2]\n if not p2.startswith(\"server_1=\"):\n print(\"error: expected server_1 parameter, got {}\".format(p2))\n sys.exit(1)\n if not p2.endswith(\"L0_custom_param/models\"):\n print(\"error: expected model-repository to end with L0_custom_backend/models, got {}\".format(p2));\n sys.exit(1)\n\n # configuration param values can be returned in any order.\n p3 = params[3]\n p4 = params[4]\n if p3.startswith(\"param1\"):\n p3, p4 = p4, p3\n\n if p3 != \"param0=value0\":\n print(\"error: expected param0=value0, got {}\".format(p3));\n sys.exit(1)\n\n if p4 != \"param1=value1\":\n print(\"error: expected param1=value1, got {}\".format(p4));\n sys.exit(1)\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport unittest\nimport numpy as np\nimport tritonhttpclient\nfrom tritonclientutils.utils import *\n\nclass UnknownRankTest(unittest.TestCase):\n # helper function to generate requests to the server\n def infer_unknown(self, model_name, tensor_shape):\n print(\"About to run the test\")\n input_data = np.random.random_sample(tensor_shape).astype(np.float32)\n client = tritonhttpclient.InferenceServerClient('localhost:8000')\n inputs = [tritonhttpclient.InferInput(\"INPUT\", input_data.shape, np_to_triton_dtype(input_data.dtype))]\n inputs[0].set_data_from_numpy(input_data)\n results = client.infer(model_name, inputs)\n self.assertTrue(np.array_equal(results.as_numpy('OUTPUT'), input_data))\n\n\n def test_success(self):\n model_name=\"unknown_rank_success\"\n tensor_shape=(1,)\n try:\n self.infer_unknown(model_name, tensor_shape)\n except InferenceServerException as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n\n def test_wrong_output(self):\n tensor_shape=(1,)\n model_name=\"unknown_rank_wrong_output\"\n try:\n self.infer_unknown(model_name, tensor_shape)\n except InferenceServerException as ex:\n self.assertTrue(\"tensor \\'OUTPUT\\': the model expects 1 dimensions \" \\\n \"(shape [1]) but the model configuration specifies 2 dimensions \" \\\n \"(shape [1,1])\" in ex.message())\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.zeros"
],
[
"numpy.random.random_sample"
]
] |
KroniaPytorch/KroniaModels
|
[
"8ac6e66771ff24c64a6e4a63d4799b2aba1c95e4"
] |
[
"CottonDisease/CottonModel.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass CottonTypeModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3,16,3,1)\n self.conv2 = nn.Conv2d(16,32,3,1)\n self.conv3 = nn.Conv2d(32,64,3,1)\n self.conv4 = nn.Conv2d(64,64,3,1)\n self.conv5 = nn.Conv2d(64,64,3,1)\n self.fc1 = nn.Linear(1600,128)\n self.fc2 = nn.Linear(128,4)\n \n def forward(self,X):\n X = F.relu(self.conv1(X))\n X = F.max_pool2d(X,2,2)\n \n X = F.relu(self.conv2(X))\n X = F.max_pool2d(X,2,2)\n \n X = F.relu(self.conv3(X))\n X = F.max_pool2d(X,2,2)\n \n X = F.relu(self.conv4(X))\n X = F.max_pool2d(X,2,2)\n \n X = F.relu(self.conv5(X))\n X = F.max_pool2d(X,2,2)\n \n X = X.view(-1,1600)\n \n X = F.relu(self.fc1(X))\n X = self.fc2(X)\n return F.log_softmax(X,dim=1)"
] |
[
[
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.log_softmax"
]
] |
Hummer12007/pytorch-metric-learning
|
[
"93e6421addc822d7565c64d7ff166d46be757acd"
] |
[
"src/pytorch_metric_learning/losses/angular_loss.py"
] |
[
"#! /usr/bin/env python3\n\nfrom .base_metric_loss_function import BaseMetricLossFunction\nimport numpy as np\nimport torch\nfrom ..utils import loss_and_miner_utils as lmu\n\nclass AngularLoss(BaseMetricLossFunction):\n \"\"\"\n Implementation of https://arxiv.org/abs/1708.01682\n Args:\n alpha: The angle (as described in the paper), specified in degrees.\n \"\"\"\n def __init__(self, alpha, **kwargs):\n super().__init__(**kwargs)\n self.alpha = torch.tensor(np.radians(alpha))\n self.add_to_recordable_attributes(list_of_names=[\"average_angle\"])\n \n def compute_loss(self, embeddings, labels, indices_tuple):\n anchors, positives, keep_mask = self.set_stats_get_pairs(embeddings, labels, indices_tuple)\n if anchors is None: return 0\n\n sq_tan_alpha = torch.tan(self.alpha) ** 2\n ap_dot = torch.sum(anchors * positives, dim=1, keepdim=True)\n ap_matmul_embeddings = torch.matmul((anchors + positives),(embeddings.unsqueeze(2)))\n ap_matmul_embeddings = ap_matmul_embeddings.squeeze(2).t()\n\n final_form = (4 * sq_tan_alpha * ap_matmul_embeddings) - (2 * (1 + sq_tan_alpha) * ap_dot)\n final_form = self.maybe_modify_loss(final_form)\n return torch.mean(lmu.logsumexp(final_form, keep_mask=keep_mask, add_one=True))\n\n def set_stats_get_pairs(self, embeddings, labels, indices_tuple):\n a1, p, a2, _ = lmu.convert_to_pairs(indices_tuple, labels)\n if len(a1) == 0 or len(a2) == 0:\n return [None]*3\n anchors, positives = embeddings[a1], embeddings[p]\n keep_mask = (labels[a1].unsqueeze(1) != labels.unsqueeze(0)).float()\n\n centers = (anchors + positives) / 2\n ap_dist = torch.nn.functional.pairwise_distance(anchors, positives, 2)\n nc_dist = torch.norm(centers - embeddings.unsqueeze(1), p=2, dim=2).t()\n angles = torch.atan(ap_dist.unsqueeze(1) / (2*nc_dist))\n average_angle = torch.sum(angles*keep_mask) / torch.sum(keep_mask)\n self.average_angle = np.degrees(average_angle.item())\n return anchors, positives, keep_mask\n\n def maybe_modify_loss(self, x):\n return x\n"
] |
[
[
"torch.nn.functional.pairwise_distance",
"numpy.radians",
"torch.sum",
"torch.tan"
]
] |
nosratullah/econophysics
|
[
"d084abfd0b2a15469a1da6d4f20629ec789e5059"
] |
[
"data_reading.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport datetime as dt\nimport pandas_datareader.data as web\n\nstyle.use('ggplot')\n\n'''\nstart = dt.datetime(2015,1,1)\nend = dt.datetime(2019,12,31)\ndf = web.DataReader('TSLA', 'yahoo', start, end)\ndf.to_csv('tesla.csv')\n'''\nname = 'AAPL'\ndf = pd.read_csv('data/{}.csv'.format(name), parse_dates=True, index_col=0)\ndf['100MA'] = df['Adj Close'].rolling(window = 100, min_periods = 0).mean()\n\nax1 = plt.subplot2grid((6,1), (0,0), rowspan = 5, colspan=1)\nax2 = plt.subplot2grid((6,1), (5,0), rowspan = 1, colspan=1, sharex= ax1)\n\n\nax1.plot(df.index, df['Adj Close'], linewidth=0.5, color='blue', label='Adj Close')\nax1.plot(df.index, df['100MA'], color='red', label='EMA')\nplt.legend(loc=2)\n\nax2.bar(df.index, df['Volume'], label='Volume')\nplt.legend(loc=2)\nplt.savefig('{}.png'.format(name))\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot2grid",
"matplotlib.style.use"
]
] |
flowmatters/openwater
|
[
"8c48fc1694f54c2735a7ac451fcce56df498e520"
] |
[
"openwater/array_params.py"
] |
[
"\nimport numpy as np\n\ndef get_parameter_locations(desc,current):\n result = []\n\n dim_sizes = {}\n current_parameter_idx = 0\n\n for p in desc['Parameters']:\n p_start = current_parameter_idx\n size = 1\n if ('Dimensions' in p) and len(p['Dimensions']):\n for d in p['Dimensions']:\n size *= dim_sizes[d]\n size = int(size)\n\n if p['Name'] in desc['Dimensions']:\n dim_size = max(current[current_parameter_idx,:])\n dim_sizes[p['Name']] = dim_size\n\n current_parameter_idx += size\n p_end = current_parameter_idx\n result.append((p_start,p_end))\n return result\n\ndef param_starts(desc,current):\n param_locs = get_parameter_locations(desc,np.array(current))\n param_starts = {}\n current_idx=0\n for ix,param in enumerate(desc['Parameters']):\n p_start = param_locs[ix][0]\n param_starts[param['Name']]=p_start\n return param_starts\n\n "
] |
[
[
"numpy.array"
]
] |
mancunian1792/FewSum
|
[
"c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e"
] |
[
"fewsum/modelling/generators/beamer.py"
] |
[
"import torch as T\nfrom torch import Tensor\nfrom mltoolkit.mlmo.utils.helpers.pytorch.data import adjust_tensor_to_beam_size\nfrom mltoolkit.mlutils.helpers.general import merge_dicts\nfrom mltoolkit.mlmo.utils.tools import DecState\nfrom mltoolkit.mlmo.utils.helpers.general import collect_arts\nfrom fewsum.utils.tools import BeamSearch\n\n\nclass Beamer(object):\n \"\"\"\n Wrapper class for PyTorch beam search that works with recurring values that\n need to be passed back to the generator/decoder.\n \"\"\"\n\n def __init__(self, decoding_func, start_id, end_id, device='cpu',\n validate_dec_out=True, len_norm=False, excl_ids=None,\n n_best=1, beam_size=1, **kwargs):\n \"\"\"\n :param decoding_func: a function that inputs:\n 1. prev token ids [batch_size * beam_size, 1]\n 2. recurring values (e.g. hidden states)\n and outputs DecState object with at least:\n 1. word log probs [batch_size * beam_size, 1,\n vocab_size]\n 2. hidden state [batch_size * beam_size,\n hidden_dim]\n :param start_id: id of the symbol that starts sequences.\n :param end_id: id of the symbol that ends sequences.\n :param device: self-explanatory.\n :param validate_dec_out: whether to throw an error if the decoding function\n outputs values above [-inf, 0].\n :param len_norm: whether sequence scores should be normalized\n by length. Such that the search would not favor shorter \n sequences.\n :param excl_ids: list of ids that should be excluded from scores.\n :param n_best: determines how many candidates in the search are necessary\n to find in order for search to be considered as finished.\n :param beam_size: the number of candidates that are considered at every\n step.\n \"\"\"\n super(Beamer, self).__init__()\n self.decoding_func = decoding_func\n self.device = device\n self.beam_size = beam_size\n self._beam_search_constr = lambda bs, min_lens: BeamSearch(\n beam_size=beam_size,\n batch_size=bs,\n start_id=start_id,\n end_id=end_id,\n device=device, len_norm=len_norm,\n min_lens=min_lens,\n excl_ids=excl_ids, n_best=n_best,\n **kwargs)\n self.val_dec_out = validate_dec_out\n\n def __call__(self, init_dec_state, max_steps, minimum=None, min_lens=None,\n **dec_kwargs):\n \"\"\"\n Decodes sequence token ids based on the initial state until the maximum\n number of steps is reached. \n\n :param init_dec_state: DecState object containing the initial state of\n the decoder.\n :param max_steps: the number of times the decoder is executed before\n collection of completed hypotheses starts.\n :param minimum: the minimum numbers of outputs (even if incomplete).\n :param min_lens: minimum lengths of decoded sequences.\n :param dec_kwargs: additional parameters that are passed on the decoding\n function directly. E.g. values to attend. Will try\n to adjust them to `beam_size`.\n :return: best_seqs: list of lists, where each contains word ids of the \n best (completed) hypotheses.\n best_coll_vals(opt): list of lists, elements are PyTorch\n elements.\n \"\"\"\n assert max_steps > 0\n if not isinstance(init_dec_state, DecState) or \\\n init_dec_state.rec_vals is None or\\\n not len(init_dec_state.rec_vals):\n raise TypeError(\"Please provide a valid decoder's initial state\"\n \" object.\")\n first_key = next(iter(init_dec_state.rec_vals.keys()))\n bs = len(init_dec_state.rec_vals[first_key])\n beam_search = self._beam_search_constr(bs, min_lens)\n\n # replicating recurring values and kwargs to adjust to the beam size\n init_dec_state = _adjust_init_state_to_beam(init_dec_state, self.beam_size)\n dec_kwargs = _adjust_kwargs_to_beam(dec_kwargs, beam_size=self.beam_size)\n rec_vals = init_dec_state.rec_vals\n coll_vals = {} # additional artifacts that are produced by the decoder\n\n for t in range(max_steps):\n if beam_search.done():\n break\n\n if t > 0:\n assert rec_vals is not None\n # shuffling recurring values based on back-pointers\n bp = beam_search.get_current_origin()\n for k, v in rec_vals.items():\n rec_vals[k] = _bp_shuffle(v, bp, beam_size=self.beam_size)\n\n # merging static params and the recurring ones\n new_kwargs = merge_dicts(rec_vals, dec_kwargs)\n\n prev_word_ids = beam_search.get_current_state()\n prev_word_ids = prev_word_ids.view(bs*self.beam_size, 1)\n out = self.decoding_func(prev_word_ids, **new_kwargs)\n\n assert isinstance(out, DecState)\n word_log_probs = out.word_scores\n rec_vals = out.rec_vals\n\n # collecting artifacts if produced by the decoder\n if out.coll_vals:\n collect_arts(coll_vals, out.coll_vals)\n\n if self.val_dec_out and not (word_log_probs <= 0. + 1e-4).all():\n raise ValueError(\"Please adjust the decoding function as it \"\n \"should provide valid log-probabilities.\")\n\n word_log_probs = word_log_probs.view(bs, self.beam_size, -1)\n beam_search.advance(word_log_probs)\n\n if coll_vals:\n # concatenating artifacts produced by the decoder over time-steps\n coll_vals = {k: T.stack(v, dim=0) for k, v in coll_vals.items()}\n\n res = beam_search.get_finished_best(minimum=minimum, **coll_vals)\n\n if coll_vals:\n sel_seq = res[0]\n sel_coll_vals = res[1]\n else:\n sel_seq = res\n sel_coll_vals = None\n\n sel_seq = _dec_out_formatter(sel_seq)\n\n return sel_seq, sel_coll_vals\n\n\ndef _bp_shuffle(tensor, bp, beam_size):\n \"\"\"Shuffles 2D and 3D tensor according to back-pointers.\"\"\"\n # TODO: make generalized to any number of dimensions\n bs = tensor.size(0)//beam_size\n if len(tensor.shape) == 2:\n tensor = tensor.view(bs, beam_size, -1)\n tensor = tensor[T.arange(bs).reshape((-1, 1)), bp].view(bs * beam_size, -1)\n elif len(tensor.shape) == 3:\n dim1 = tensor.size(1)\n tensor = tensor.view(bs, beam_size, dim1, -1)\n tensor = tensor[T.arange(bs).reshape((-1, 1)), bp].view(bs * beam_size,\n dim1, -1)\n elif len(tensor.shape) == 4:\n dim1 = tensor.size(1)\n dim2 = tensor.size(2)\n tensor = tensor.view(bs, beam_size, dim1, dim2, -1)\n tensor = tensor[T.arange(bs).unsqueeze(1), bp].view(bs*beam_size, dim1,\n dim2, -1)\n else:\n raise ValueError(\"At the moment the decoder does not support 4D+\"\n \" tensors.\")\n return tensor\n\n\ndef _adjust_kwargs_to_beam(kwargs, beam_size):\n for k, v in kwargs.items():\n if isinstance(v, Tensor):\n kwargs[k] = adjust_tensor_to_beam_size(v, beam_size=beam_size)\n return kwargs\n\n\ndef _adjust_init_state_to_beam(init_dec_state, beam_size):\n \"\"\"\n Replicates recurring values in the initial decoder's state based on the\n beam size.\n \"\"\"\n for k, v in init_dec_state.rec_vals.items():\n init_dec_state.rec_vals[k] = adjust_tensor_to_beam_size(v, beam_size)\n return init_dec_state\n\n\ndef _dec_out_formatter(out):\n \"\"\"Converts PyTorch int sequences (word ids) to Python integers.\"\"\"\n res = []\n for seq in out:\n tmp = []\n for elem in seq:\n elem = elem.to('cpu').numpy()\n if len(elem.shape) == 0:\n elem = elem.item()\n tmp.append(elem)\n res.append(tmp)\n return res\n"
] |
[
[
"torch.stack",
"torch.arange"
]
] |
ewanlee/ICLR2020-OpenReviewData
|
[
"81560dad234e9485f35e473607c4101d0012f05a"
] |
[
"crawl_data.py"
] |
[
"import numpy as np\nimport h5py\nimport string\n\nAFTER_DECISION = False\n\n# Meta data of papers\nclass PaperMeta(object):\n def __init__(self, title, abstract, keyword, rating, url, withdrawn, desk_reject, decision):\n self.title = title # str\n self.abstract = abstract # str\n self.keyword = keyword # list[str]\n self.rating = rating # list[int]\n self.url = url\n self.withdrawn = withdrawn\n self.desk_reject = desk_reject \n self.decision = decision\n \n if len(self.rating) > 0:\n self.average_rating = np.mean(rating)\n else:\n self.average_rating = -1\n\n \nclass Keyword(object):\n def __init__(self, keyword, frequency, rating):\n self.keyword = keyword # list[str]\n self.frequency = frequency\n self.rating = rating # list[int] \n \n def average_rating(self):\n if len(self.rating) > 0:\n return np.mean(self.rating)\n else:\n return -1\n \n def update_frequency(self, frequency):\n self.frequency += frequency\n \n def update_rating(self, rating):\n self.rating = np.concatenate((self.rating, rating))\n \n \ndef write_meta(meta_list, filename):\n f = h5py.File(filename, 'w')\n for i, m in enumerate(meta_list):\n grp = f.create_group(str(i))\n grp['title'] = m.title\n grp['abstract'] = m.abstract\n grp['keyword'] = '#'.join(m.keyword)\n grp['rating'] = m.rating\n grp['url'] = m.url\n grp['withdrawn'] = m.withdrawn \n grp['desk_reject'] = m.desk_reject \n grp['decision'] = m.decision\n f.close()\n \n \ndef read_meta(filename):\n f = h5py.File(filename, 'r')\n meta_list = []\n for k in list(f.keys()):\n meta_list.append(PaperMeta(\n f[k]['title'].value, \n f[k]['abstract'].value, \n f[k]['keyword'].value.split('#'),\n f[k]['rating'].value,\n f[k]['url'].value,\n f[k]['withdrawn'].value, \n f[k]['desk_reject'].value, \n f[k]['decision'].value, \n ))\n return meta_list\n\n\ndef crawl_meta(meta_hdf5=None, write_meta_name='data.hdf5'):\n \n if meta_hdf5 is None:\n # Crawl the meta data from OpenReview\n # Set up a browser to crawl from dynamic web pages \n from selenium import webdriver\n from selenium.webdriver.chrome.options import Options\n \n # from pyvirtualdisplay import Display\n # display = Display(visible=0, size=(800, 800))\n # display.start()\n \n import time\n executable_path = '/usr/local/bin/chromedriver'\n options = Options()\n options.add_argument(\"--headless\")\n browser = webdriver.Chrome(options=options, executable_path=executable_path) \n \n # Load all URLs for all ICLR submissions\n urls = []\n with open('urls.txt') as f:\n urls = f.readlines()\n urls = [url.strip() for url in urls]\n \n meta_list = [] \n wait_time = 0.25\n max_try = 1000\n for i, url in enumerate(urls):\n try:\n browser.get(url)\n time.sleep(wait_time)\n key = browser.find_elements_by_class_name(\"note_content_field\")\n key = [k.text for k in key]\n withdrawn = 'Withdrawal Confirmation:' in key\n desk_reject = 'Desk Reject Comments:' in key\n value = browser.find_elements_by_class_name(\"note_content_value\")\n value = [v.text for v in value]\n\n # title\n title = string.capwords(browser.find_element_by_class_name(\"note_content_title\").text)\n # abstract\n valid = False\n tries = 0\n while not valid:\n if 'Abstract:' in key:\n valid = True\n else:\n time.sleep(wait_time)\n tries += 1\n key = browser.find_elements_by_class_name(\"note_content_field\")\n key = [k.text for k in key]\n withdrawn = 'Withdrawal Confirmation:' in key\n value = browser.find_elements_by_class_name(\"note_content_value\")\n value = [v.text for v in value] \n if tries >= max_try:\n print('Reached max try: {} ({})'.format(title, url))\n break\n abstract = ' '.join(value[key.index('Abstract:')].split('\\n'))\n # keyword\n if 'Keywords:' in key:\n keyword = value[key.index('Keywords:')].split(',')\n keyword = [k.strip(' ') for k in keyword]\n keyword = [''.join(string.capwords(k).split(' ')) for k in keyword if not k == '']\n for j in range(len(keyword)):\n if '-' in keyword[j]:\n keyword[j] = ''.join([string.capwords(kk) for kk in keyword[j].split('-')]) \n else:\n keyword = []\n # rating\n rating_idx = [i for i, x in enumerate(key) if x == \"Rating:\"]\n rating = []\n if len(rating_idx) > 0:\n for idx in rating_idx:\n rating.append(int(value[idx].split(\":\")[0]))\n # decision\n if 'Recommendation:' in key:\n decision = value[key.index('Recommendation:')]\n else:\n decision = 'N/A'\n \n withdrawn_or_desk_reject = withdrawn or desk_reject\n \n print('[{}] [Abs: {} chars, keywords: {}, ratings: {}{}] {}{}'.format(\n i+1, len(abstract), len(keyword), rating, \n '' if not AFTER_DECISION else ', decision: {}'.format(decision), \n title, '' if not withdrawn_or_desk_reject else ' ({})'.format(\n 'withdrawn' if withdrawn else 'desk reject'\n ))\n )\n meta_list.append(PaperMeta(title, abstract, keyword, rating, url, \n withdrawn, desk_reject, decision))\n except:\n print('Failed to load {}'.format(url))\n \n # Save the crawled data\n write_meta(meta_list, write_meta_name)\n else:\n # Load the meta data from local\n meta_list = read_meta(meta_hdf5)\n return meta_list\n\n# Get the meta data\nmeta_list = crawl_meta()\nnum_withdrawn = len([m for m in meta_list if m.withdrawn or m.desk_reject])\nprint('Number of submissions: {} (withdrawn/desk reject submissions: {})'.format(\n len(meta_list), num_withdrawn))\n"
] |
[
[
"numpy.concatenate",
"numpy.mean"
]
] |
Yuri-x/vkit
|
[
"c583149b860064549694517b67f3b71c50044332"
] |
[
"vkit/augmentation/geometric_distortion/mls.py"
] |
[
"from typing import Sequence, Tuple\n\nimport numpy as np\nimport attr\n\nfrom vkit.image.type import VImage\nfrom vkit.label.type import VPoint\nfrom .grid_rendering.interface import PointProjector\nfrom .grid_rendering.grid_creator import create_src_image_grid\nfrom .interface import GeometricDistortionImageGridBased, StateImageGridBased\n\n\n@attr.define\nclass SimilarityMlsConfig:\n src_handle_points: Sequence[VPoint]\n dst_handle_points: Sequence[VPoint]\n grid_size: int\n rescale_as_src: bool = False\n\n\nclass SimilarityMlsPointProjector(PointProjector):\n\n def __init__(self, src_handle_points: Sequence[VPoint], dst_handle_points: Sequence[VPoint]):\n self.src_handle_points = src_handle_points\n self.dst_handle_points = dst_handle_points\n\n self.src_xy_pair_to_dst_point = {\n (src_point.x, src_point.y): dst_point\n for src_point, dst_point in zip(src_handle_points, dst_handle_points)\n }\n\n self.src_handle_np_points = np.asarray(\n [(point.x, point.y) for point in src_handle_points],\n dtype=np.int32,\n )\n self.dst_handle_np_points = np.asarray(\n [(point.x, point.y) for point in dst_handle_points],\n dtype=np.int32,\n )\n\n def project_point(self, src_point):\n '''\n Calculate the corresponding dst point given the src point.\n Paper: https://people.engr.tamu.edu/schaefer/research/mls.pdf\n '''\n src_xy_pair = (src_point.x, src_point.y)\n\n if src_xy_pair in self.src_xy_pair_to_dst_point:\n # Identity.\n # NOTE: clone is important since this point could be changed later.\n # TODO: re-think the immutable design.\n return self.src_xy_pair_to_dst_point[src_xy_pair].clone()\n\n # Calculate the distance to src handles.\n src_distance_squares = self.src_handle_np_points.copy()\n src_distance_squares[:, 0] -= src_point.x\n src_distance_squares[:, 1] -= src_point.y\n np.square(src_distance_squares, out=src_distance_squares)\n # (N), and should not contain 0.0.\n src_distance_squares = np.sum(src_distance_squares, axis=1)\n\n # Calculate weights based on distances.\n # (N), and should not contain inf.\n with np.errstate(divide='raise'):\n src_distance_squares_inverse = 1 / src_distance_squares\n weights = src_distance_squares_inverse / np.sum(src_distance_squares_inverse)\n\n # (2), the weighted centroids.\n src_centroid = np.matmul(weights, self.src_handle_np_points)\n dst_centroid = np.matmul(weights, self.dst_handle_np_points)\n\n # (N, 2)\n src_hat = self.src_handle_np_points - src_centroid\n dst_hat = self.dst_handle_np_points - dst_centroid\n\n # (N, 2)\n src_hat_vert = src_hat[:, [1, 0]]\n src_hat_vert[:, 0] *= -1\n\n # Calculate matrix A.\n src_centroid_x, src_centroid_y = src_centroid\n src_mat_anchor = np.transpose(\n np.array(\n [\n # v - p*\n (\n src_point.x - src_centroid_x,\n src_point.y - src_centroid_y,\n ),\n # -(v - p*)^vert\n (\n src_point.y - src_centroid_y,\n -(src_point.x - src_centroid_x),\n ),\n ],\n dtype=np.float32,\n )\n )\n # (N, 2)\n src_mat_row0 = np.matmul(src_hat, src_mat_anchor)\n src_mat_row1 = np.matmul(-src_hat_vert, src_mat_anchor)\n # (N, 2, 2)\n src_mat = (\n np.expand_dims(np.expand_dims(src_distance_squares_inverse, axis=1), axis=1)\n * np.stack((src_mat_row0, src_mat_row1), axis=1)\n )\n\n # Calculate the point in dst.\n # (N, 2)\n dst_prod = np.squeeze(\n # (N, 1, 2)\n np.matmul(\n # (N, 1, 2)\n np.expand_dims(dst_hat, axis=1),\n # (N, 2, 2)\n src_mat,\n ),\n axis=1,\n )\n mu = np.sum(src_distance_squares_inverse * np.sum(src_hat * src_hat, axis=1))\n dst_x, dst_y = np.sum(dst_prod, axis=0) / mu + dst_centroid\n\n return VPoint(y=round(dst_y), x=round(dst_x))\n\n\nclass SimilarityMlsState(StateImageGridBased):\n\n def __init__(self, config: SimilarityMlsConfig, shape: Tuple[int, int]):\n height, width = shape\n\n super().__init__(\n src_image_grid=create_src_image_grid(height, width, config.grid_size),\n point_projector=SimilarityMlsPointProjector(\n config.src_handle_points,\n config.dst_handle_points,\n ),\n )\n\n self.dst_handle_points = list(map(self.shift_and_rescale_point, config.dst_handle_points))\n\n\nsimilarity_mls = GeometricDistortionImageGridBased(\n config_cls=SimilarityMlsConfig,\n state_cls=SimilarityMlsState,\n)\n\n\ndef debug():\n from vkit.opt import get_data_folder\n folder = get_data_folder(__file__)\n\n from vkit.label.type import VPolygon, VPointList\n from .interface import debug_geometric_distortion\n\n config = SimilarityMlsConfig(\n src_handle_points=[\n VPoint(y=10, x=10),\n VPoint(y=10, x=200),\n VPoint(y=200, x=200),\n VPoint(y=200, x=10),\n ],\n dst_handle_points=[\n VPoint(y=10, x=10),\n VPoint(y=10, x=550),\n VPoint(y=200, x=150),\n VPoint(y=200, x=10),\n ],\n grid_size=20,\n rescale_as_src=True,\n )\n\n src_polygon = VPolygon(\n VPointList([\n VPoint(y=100, x=100),\n VPoint(y=100, x=300),\n VPoint(y=300, x=300),\n VPoint(y=300, x=100),\n ])\n )\n\n state = debug_geometric_distortion(\n 'similarity-mls',\n similarity_mls,\n config,\n src_polygon,\n folder,\n 'Lenna.png',\n )\n assert state\n\n from .grid_rendering.visualization \\\n import visualize_image_grid\n from vkit.label.visualization import visualize_points\n\n visualize_points(\n visualize_image_grid(state.src_image_grid),\n config.src_handle_points,\n style='circle-3',\n ).to_file(f'{folder}/similarity-mls-src-grid.png')\n visualize_points(\n visualize_image_grid(state.dst_image_grid),\n state.dst_handle_points,\n style='circle-3',\n ).to_file(f'{folder}/similarity-mls-dst-grid.png')\n\n\ndef debug_video_frames():\n from vkit.opt import get_data_folder\n folder = get_data_folder(__file__)\n\n from .grid_rendering.visualization import visualize_image_grid\n from vkit.label.visualization import visualize_points\n\n src_image = VImage.from_file(f'{folder}/Lenna.png')\n\n num_frames = 200\n for idx in range(num_frames):\n print(idx)\n ratio = (num_frames - 1 - idx) / (num_frames - 1)\n\n config = SimilarityMlsConfig(\n src_handle_points=[\n VPoint(y=100, x=100),\n VPoint(y=100, x=340),\n VPoint(y=340, x=340),\n VPoint(y=340, x=100),\n ],\n dst_handle_points=[\n VPoint(y=100, x=100),\n VPoint(y=100, x=340),\n VPoint(y=220 + round((340 - 220) * ratio), x=220 + round((340 - 220) * ratio)),\n VPoint(y=340, x=100),\n ],\n grid_size=15,\n rescale_as_src=True,\n )\n\n state = similarity_mls.generate_state(config, src_image)\n assert state\n dst_image = similarity_mls.distort_image(config, src_image, state)\n dst_image.to_file(f'{folder}/frames/{idx}.png')\n visualize_points(\n visualize_image_grid(state.dst_image_grid),\n state.dst_handle_points,\n style='circle-3',\n ).to_file(f'{folder}/grid-frames/{idx}.png')\n\n\ndef debug_create_video():\n from vkit.opt import get_data_folder\n folder = get_data_folder(__file__)\n\n combined_mats = []\n for idx in range(200):\n image = VImage.from_file(f'{folder}/frames/{idx}.png')\n grid = VImage.from_file(f'{folder}/grid-frames/{idx}.png')\n\n assert image.shape == grid.shape\n combined_mat = np.zeros((image.height * 2, image.width, 3), dtype=np.uint8)\n combined_mat[:image.height, :] = image.mat\n combined_mat[image.height:, :] = grid.mat\n\n combined_mats.append(combined_mat)\n\n from moviepy.editor import ImageSequenceClip\n clip = ImageSequenceClip(combined_mats, fps=30)\n clip.write_videofile(f'{folder}/video.mp4')\n"
] |
[
[
"numpy.square",
"numpy.expand_dims",
"numpy.asarray",
"numpy.matmul",
"numpy.stack",
"numpy.errstate",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
francismontalbo/attention-is-all-you-need-paper
|
[
"21ba3e48917da0c6808126d183bece6a9969cfd2"
] |
[
"src/train.py"
] |
[
"import os\nfrom logging import log\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport wandb\nfrom config import configs\nfrom learner import Learner\nfrom scheduler import CustomScheduler\nfrom dataset import get_translation_dataloaders\nfrom callbacks import CheckpointSaver, MoveToDeviceCallback, TrackLoss, TrackExample, TrackBleu\nfrom architectures.machine_translation_transformer import MachineTranslationTransformer\n\n# Initialize configuration\nimport wandb\nfrom config import configs\nconfig_name='unofficial_single_gpu_config' # MODIFY THIS TO CHANGE CONFIGURATION\nwandb.init(config=configs[config_name],project=\"attention-is-all-you-need-paper\", entity=\"bkoch4142\")\n\n# Configure Logging\nfrom utils.logconf import logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n# Seed the Random Number Generators\nimport torch\ntorch.manual_seed(0)\nimport random\nrandom.seed(0)\nimport numpy as np\nnp.random.seed(0)\n\n\nclass TrainingApp:\n def __init__(self):\n\n log.info('----- Training Started -----')\n\n # Device handling\n if wandb.config.DEVICE=='gpu':\n if not torch.cuda.is_available():\n raise ValueError('GPU is not available.')\n self.device = 'cuda'\n log.info(f'Device name is {torch.cuda.get_device_name()}')\n else:\n log.info(f'Device name is CPU')\n self.device='cpu'\n\n def main(self):\n\n train_dl, val_dl = get_translation_dataloaders(\n dataset_size=wandb.config.DATASET_SIZE,\n vocab_size=wandb.config.VOCAB_SIZE,\n tokenizer_save_pth=os.path.join(wandb.config.RUNS_FOLDER_PTH,wandb.config.RUN_NAME,'tokenizer.json'),\n tokenizer_type=wandb.config.TOKENIZER_TYPE,\n batch_size=wandb.config.BATCH_SIZE,\n report_summary=True,\n max_seq_len=wandb.config.MAX_SEQ_LEN,\n test_proportion=wandb.config.TEST_PROPORTION,\n )\n\n model = MachineTranslationTransformer(\n d_model=wandb.config.D_MODEL,\n n_blocks=wandb.config.N_BLOCKS,\n src_vocab_size=wandb.config.VOCAB_SIZE,\n trg_vocab_size=wandb.config.VOCAB_SIZE,\n n_heads=wandb.config.N_HEADS,\n d_ff=wandb.config.D_FF,\n dropout_proba=wandb.config.DROPOUT_PROBA\n )\n\n loss_func = nn.CrossEntropyLoss(ignore_index=0, label_smoothing=0.1, reduction='mean')\n\n optimizer = optim.Adam(model.parameters(), betas=wandb.config.BETAS, eps=wandb.config.EPS)\n scheduler=CustomScheduler(optimizer, wandb.config.D_MODEL, wandb.config.N_WARMUP_STEPS)\n \n # # The above scheduler's efficiency is highly influenced by dataset and batch size,\n # # alternatively you can use the below configuration, which also works much better for overfit configs.\n # optimizer = optim.Adam(model.parameters(), lr=0.00001, betas=wandb.config.BETAS, eps=wandb.config.EPS)\n # scheduler=optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.0005, epochs=wandb.config.EPOCHS, steps_per_epoch=len(train_dl), pct_start=0.3)\n \n cbs = [\n MoveToDeviceCallback(),\n TrackLoss(),\n TrackExample(),\n TrackBleu(),\n CheckpointSaver(epoch_cnt=wandb.config.MODEL_SAVE_EPOCH_CNT,),\n ]\n \n wandb.watch(model, log_freq=1000)\n learner = Learner(model,\n train_dl,\n val_dl,\n loss_func,\n cbs,\n optimizer,\n scheduler,\n self.device)\n\n learner.fit(wandb.config.EPOCHS)\n\n \nif __name__ == \"__main__\":\n TrainingApp().main()\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.get_device_name"
]
] |
VoVAllen/dgl-lifesci
|
[
"96895f2bddf255ad326f0bc4e8064bc3ed5c3044"
] |
[
"python/dgllife/data/lipophilicity.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Lipophilicity from MoleculeNet for the prediction of octanol/water\n# distribution coefficient (logD at pH 7.4) of 4200 compounds\n\nimport pandas as pd\n\nfrom dgl.data.utils import get_download_dir, download, _get_dgl_url, extract_archive\n\nfrom .csv_dataset import MoleculeCSVDataset\nfrom ..utils.mol_to_graph import smiles_to_bigraph\n\n__all__ = ['Lipophilicity']\n\nclass Lipophilicity(MoleculeCSVDataset):\n r\"\"\"Lipophilicity from MoleculeNet for the prediction of octanol/water\n distribution coefficient (logD at pH 7.4) of 4200 compounds\n\n This dataset is curated from ChEMBL database containing experimental results\n on octanol/water distribution coefficient (logD at pH=7.4). Due to the importance\n of lipophilicity in membrane permeability and solubility, the task is of high\n importance to drug development.\n\n References:\n\n * [1] MoleculeNet: A Benchmark for Molecular Machine Learning.\n * [2] ChEMBL Deposited Data Set - AZ dataset; 2015.\n\n Parameters\n ----------\n smiles_to_graph: callable, str -> DGLGraph\n A function turning a SMILES string into a DGLGraph.\n Default to :func:`dgllife.utils.smiles_to_bigraph`.\n node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict\n Featurization for nodes like atoms in a molecule, which can be used to update\n ndata for a DGLGraph. Default to None.\n edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict\n Featurization for edges like bonds in a molecule, which can be used to update\n edata for a DGLGraph. Default to None.\n load : bool\n Whether to load the previously pre-processed dataset or pre-process from scratch.\n ``load`` should be False when we want to try different graph construction and\n featurization methods and need to preprocess from scratch. Default to True.\n log_every : bool\n Print a message every time ``log_every`` molecules are processed. Default to 1000.\n cache_file_path : str\n Path to the cached DGLGraphs, default to 'lipophilicity_dglgraph.bin'.\n\n Examples\n --------\n\n >>> from dgllife.data import Lipophilicity\n >>> from dgllife.utils import smiles_to_bigraph, CanonicalAtomFeaturizer\n\n >>> dataset = Lipophilicity(smiles_to_bigraph, CanonicalAtomFeaturizer())\n >>> # Get size of the dataset\n >>> len(dataset)\n 4200\n >>> # Get the 0th datapoint, consisting of SMILES, DGLGraph and logD\n >>> dataset[0]\n ('Cn1c(CN2CCN(CC2)c3ccc(Cl)cc3)nc4ccccc14',\n DGLGraph(num_nodes=24, num_edges=54,\n ndata_schemes={'h': Scheme(shape=(74,), dtype=torch.float32)}\n edata_schemes={}),\n tensor([3.5400]))\n\n We also provide information for the ChEMBL id of the compound.\n\n >>> dataset.chembl_ids[i]\n\n We can also get the ChEMBL id along with SMILES, DGLGraph and logD at once.\n\n >>> dataset.load_full = True\n >>> dataset[0]\n ('Cn1c(CN2CCN(CC2)c3ccc(Cl)cc3)nc4ccccc14',\n DGLGraph(num_nodes=24, num_edges=54,\n ndata_schemes={'h': Scheme(shape=(74,), dtype=torch.float32)}\n edata_schemes={}),\n tensor([3.5400]),\n 'CHEMBL596271')\n \"\"\"\n def __init__(self,\n smiles_to_graph=smiles_to_bigraph,\n node_featurizer=None,\n edge_featurizer=None,\n load=True,\n log_every=1000,\n cache_file_path='lipophilicity_dglgraph.bin'):\n\n self._url = 'dataset/lipophilicity.zip'\n data_path = get_download_dir() + '/lipophilicity.zip'\n dir_path = get_download_dir() + '/lipophilicity'\n download(_get_dgl_url(self._url), path=data_path)\n extract_archive(data_path, dir_path)\n df = pd.read_csv(dir_path + '/Lipophilicity.csv')\n\n # ChEMBL ids\n self.chembl_ids = df['CMPD_CHEMBLID'].tolist()\n\n self.load_full = False\n\n super(Lipophilicity, self).__init__(df=df,\n smiles_to_graph=smiles_to_graph,\n node_featurizer=node_featurizer,\n edge_featurizer=edge_featurizer,\n smiles_column='smiles',\n cache_file_path=cache_file_path,\n task_names=['exp'],\n load=load,\n log_every=log_every,\n init_mask=False)\n\n def __getitem__(self, item):\n \"\"\"Get datapoint with index\n\n Parameters\n ----------\n item : int\n Datapoint index\n\n Returns\n -------\n str\n SMILES for the ith datapoint\n DGLGraph\n DGLGraph for the ith datapoint\n Tensor of dtype float32 and shape (1)\n Labels of the ith datapoint\n str, optional\n ChEMBL id of the ith datapoint\n \"\"\"\n if self.load_full:\n return self.smiles[item], self.graphs[item], self.labels[item], self.chembl_ids[item]\n else:\n return self.smiles[item], self.graphs[item], self.labels[item]\n"
] |
[
[
"pandas.read_csv"
]
] |
eEcoLiDAR/eEcoLiDAR
|
[
"f5c4e772e4893f7242ed0b10aa17ac7e693a55a0"
] |
[
"laserchicken/io/test_read_ply.py"
] |
[
"import os\nimport shutil\nimport unittest\n\nimport numpy as np\nfrom dateutil.parser import parse\nfrom pytest import raises\n\nfrom laserchicken import keys\nfrom laserchicken.io.load import load\n\n\nclass TestReadPly(unittest.TestCase):\n _test_dir = 'TestLoad_dir'\n _test_file_name = 'example.ply'\n _test_file_without_comments_name = 'example_without_comments.ply'\n _test_file_with_invalid_comments_name = 'example_with_invalid_comments.ply'\n _las_file_name = '5points.las'\n _test_data_source = 'testdata'\n las_file_path = os.path.join(_test_dir, _las_file_name)\n test_file_path = os.path.join(_test_dir, _test_file_name)\n test_file_without_comments_path = os.path.join(_test_dir, _test_file_without_comments_name)\n test_file_with_invalid_comments_path = os.path.join(_test_dir, _test_file_with_invalid_comments_name)\n\n def test_nonexistentFile_error(self):\n # Catch most specific subclass of FileNotFoundException (3.6) and IOError (2.7).\n with raises(Exception):\n load('nonexistentfile.ply')\n\n def test_wrongFormat_error(self):\n with raises(ValueError):\n load(self.las_file_path, format='.PLY')\n\n def test_existentPly_noError(self):\n load(self.test_file_path)\n\n def test_containsPointsElement(self):\n data = load(self.test_file_path)\n self.assertIn(keys.point, data)\n\n def test_containsXElement(self):\n data = load(self.test_file_path)\n self.assertIn('x', data[keys.point])\n\n def test_rightNumberOfPoints(self):\n data = load(self.test_file_path)\n self.assertEqual(len(data[keys.point]['x']['data']), 3)\n\n def test_correctPoints(self):\n data = load(self.test_file_path)\n points = data[keys.point]\n point = np.array(\n [points['x']['data'][0], points['y']['data'][0], points['z']['data'][0], points['return']['data'][0]])\n np.testing.assert_allclose(point, np.array([0.11, 0.12, 0.13, 1]))\n\n def test_correctPointCloud(self):\n data = load(self.test_file_path)\n point_cloud = data['pointcloud']\n offset = point_cloud['offset']['data'][0]\n np.testing.assert_allclose(offset, 12.1)\n\n def test_correctPointCloudWithoutComments(self):\n \"\"\"Missing comment section should not cause error (regression test).\"\"\"\n data = load(self.test_file_without_comments_path)\n point_cloud = data['pointcloud']\n offset = point_cloud['offset']['data'][0]\n np.testing.assert_allclose(offset, 12.1)\n\n def test_correctPointCloudWithInvalidComments(self):\n \"\"\"Invalid comments should not cause error.\"\"\"\n data = load(self.test_file_with_invalid_comments_path)\n point_cloud = data['pointcloud']\n offset = point_cloud['offset']['data'][0]\n np.testing.assert_allclose(offset, 12.1)\n\n def test_allLogEntriesContainAllColumns(self):\n log = load(self.test_file_path)['log']\n\n for entry in log:\n for key in ['time', 'module', 'parameters', 'version']:\n self.assertIn(key, entry)\n\n def test_correctModulesLogged(self):\n log = load(self.test_file_path)['log']\n\n modules = [entry['module'] for entry in log]\n # an additional 'load' is added in the log when reading\n self.assertListEqual(['load', 'filter', 'laserchicken.io.load'], modules)\n\n def test_correctTimesLogged(self):\n log = load(self.test_file_path)['log']\n\n self.assertListEqual([2018, 1, 18, 16, 1, 0, 3, 18, -1],\n list(parse(log[0]['time']).timetuple()))\n self.assertListEqual([2018, 1, 18, 16, 3, 0, 3, 18, -1],\n list(parse(log[1]['time']).timetuple()))\n\n def setUp(self):\n os.mkdir(self._test_dir)\n shutil.copyfile(os.path.join(self._test_data_source, self._test_file_name), self.test_file_path)\n shutil.copyfile(os.path.join(self._test_data_source, self._test_file_without_comments_name),\n self.test_file_without_comments_path)\n shutil.copyfile(os.path.join(self._test_data_source, self._test_file_with_invalid_comments_name),\n self.test_file_with_invalid_comments_path)\n shutil.copyfile(os.path.join(self._test_data_source, self._las_file_name), self.las_file_path)\n\n def tearDown(self):\n shutil.rmtree(self._test_dir)\n\n\nclass TestReadPlyBinary(TestReadPly):\n _test_file_name = 'example_little_endian.ply'\n _test_file_without_comments_name = 'example_without_comments_little_endian.ply'\n _test_file_with_invalid_comments_name = 'example_with_invalid_comments_little_endian.ply'\n"
] |
[
[
"numpy.array",
"numpy.testing.assert_allclose"
]
] |
MPCAICDM/MPCA
|
[
"c996435a0578ea4160f934bc01041c2ef23468f3",
"c996435a0578ea4160f934bc01041c2ef23468f3",
"c996435a0578ea4160f934bc01041c2ef23468f3"
] |
[
"models/LSA_cifar10.py",
"helpers/cae_helper.py",
"models/wrn_pytorch.py"
] |
[
"from functools import reduce\nfrom operator import mul\nfrom typing import Tuple\n\nimport torch\nimport torch.nn as nn\n\nfrom models.blocks.LSA_blocks import DownsampleBlock, UpsampleBlock, ResidualBlock\nfrom models.blocks.estimator_1D import Estimator1D\n\n\nclass Encoder(nn.Module):\n \"\"\"\n CIFAR10 model encoder.\n \"\"\"\n def __init__(self, input_shape, code_length):\n # type: (Tuple[int, int, int], int) -> None\n \"\"\"\n Class constructor:\n\n :param input_shape: the shape of CIFAR10 samples.\n :param code_length: the dimensionality of latent vectors.\n \"\"\"\n super(Encoder, self).__init__()\n\n self.input_shape = input_shape\n self.code_length = code_length\n\n c, h, w = input_shape\n\n activation_fn = nn.LeakyReLU()\n\n # Convolutional network\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=c, out_channels=32, kernel_size=3, bias=False),\n activation_fn,\n ResidualBlock(channel_in=32, channel_out=32, activation_fn=activation_fn),\n DownsampleBlock(channel_in=32, channel_out=64, activation_fn=activation_fn),\n DownsampleBlock(channel_in=64, channel_out=128, activation_fn=activation_fn),\n DownsampleBlock(channel_in=128, channel_out=256, activation_fn=activation_fn),\n )\n self.deepest_shape = (256, h // 8, w // 8)\n\n # FC network\n self.fc = nn.Sequential(\n nn.Linear(in_features=reduce(mul, self.deepest_shape), out_features=256),\n nn.BatchNorm1d(num_features=256),\n activation_fn,\n nn.Linear(in_features=256, out_features=code_length),\n nn.Tanh()# nn.Sigmoid() TODO replace with tanh cause our data are scaled to [-1, 1]\n )\n\n def forward(self, x):\n # types: (torch.Tensor) -> torch.Tensor\n \"\"\"\n Forward propagation.\n\n :param x: the input batch of images.\n :return: the batch of latent vectors.\n \"\"\"\n h = x\n h = self.conv(h)\n h = h.view(len(h), -1)\n o = self.fc(h)\n\n return o\n\n\nclass Decoder(nn.Module):\n \"\"\"\n CIFAR10 model decoder.\n \"\"\"\n def __init__(self, code_length, deepest_shape, output_shape):\n # type: (int, Tuple[int, int, int], Tuple[int, int, int]) -> None\n \"\"\"\n Class constructor.\n\n :param code_length: the dimensionality of latent vectors.\n :param deepest_shape: the dimensionality of the encoder's deepest convolutional map.\n :param output_shape: the shape of CIFAR10 samples.\n \"\"\"\n super(Decoder, self).__init__()\n\n self.code_length = code_length\n self.deepest_shape = deepest_shape\n self.output_shape = output_shape\n\n activation_fn = nn.LeakyReLU()\n\n # FC network\n self.fc = nn.Sequential(\n nn.Linear(in_features=code_length, out_features=256),\n nn.BatchNorm1d(num_features=256),\n activation_fn,\n nn.Linear(in_features=256, out_features=reduce(mul, deepest_shape)),\n nn.BatchNorm1d(num_features=reduce(mul, deepest_shape)),\n activation_fn\n )\n\n # Convolutional network\n self.conv = nn.Sequential(\n UpsampleBlock(channel_in=256, channel_out=128, activation_fn=activation_fn),\n UpsampleBlock(channel_in=128, channel_out=64, activation_fn=activation_fn),\n UpsampleBlock(channel_in=64, channel_out=32, activation_fn=activation_fn),\n ResidualBlock(channel_in=32, channel_out=32, activation_fn=activation_fn),\n nn.Conv2d(in_channels=32, out_channels=3, kernel_size=1, bias=False)\n )\n\n def forward(self, x):\n # types: (torch.Tensor) -> torch.Tensor\n \"\"\"\n Forward propagation.\n\n :param x: the batch of latent vectors.\n :return: the batch of reconstructions.\n \"\"\"\n h = x\n h = self.fc(h)\n h = h.view(len(h), *self.deepest_shape)\n h = self.conv(h)\n o = h\n\n return o\n\n\nclass LSACIFAR10(nn.Module):\n \"\"\"\n LSA model for CIFAR10 one-class classification.\n \"\"\"\n def __init__(self, input_shape, code_length, cpd_channels):\n # type: (Tuple[int, int, int], int, int) -> None\n \"\"\"\n Class constructor.\n\n :param input_shape: the shape of CIFAR10 samples.\n :param code_length: the dimensionality of latent vectors.\n :param cpd_channels: number of bins in which the multinomial works.\n \"\"\"\n super(LSACIFAR10, self).__init__()\n\n self.input_shape = input_shape\n self.code_length = code_length\n\n # Build encoder\n self.encoder = Encoder(\n input_shape=input_shape,\n code_length=code_length\n )\n\n # Build decoder\n self.decoder = Decoder(\n code_length=code_length,\n deepest_shape=self.encoder.deepest_shape,\n output_shape=input_shape\n )\n\n # Build estimator\n self.estimator = Estimator1D(\n code_length=code_length,\n fm_list=[32, 32, 32, 32],\n cpd_channels=cpd_channels\n )\n\n def forward(self, x):\n # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]\n \"\"\"\n Forward propagation.\n\n :param x: the input batch of images.\n :return: a tuple of torch.Tensors holding reconstructions, latent vectors and CPD estimates.\n \"\"\"\n h = x\n\n # Produce representations\n z = self.encoder(h)\n\n # Estimate CPDs with autoregression\n z_dist = self.estimator(z)\n\n # Reconstruct x\n x_r = self.decoder(z)\n x_r = x_r.view(-1, *self.input_shape)\n\n return x_r, z, z_dist\n",
"from helpers.base_helper import TrainTestHelper, transform_train, transform_test\nfrom utils import get_channels_axis,save_roc_pr_curve_data, show_roc_pr_curve_data\nfrom models.encoders_decoders import CAE_pytorch\nfrom keras2pytorch_dataset import trainset_pytorch\nimport torch.utils.data as data\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nfrom sklearn.ensemble import IsolationForest\n\n\nclass CAEHelper(TrainTestHelper):\n def __init__(self, n_channels, *args, **kwargs):\n super(CAEHelper, self).__init__(*args, **kwargs)\n self.method_tag = \"cae\"\n\n self.n_channels = n_channels\n self.model = CAE_pytorch(in_channels=self.n_channels).cuda()\n self.batch_size = 128\n\n cudnn.benchmark = True\n self.criterion = nn.MSELoss()\n # use adam always\n self.optimizer = optim.Adam(self.model.parameters(), eps=1e-7, weight_decay=0.0005)\n #self.epochs = 250\n\n\n def train_step(self, x, y=None):\n inputs = torch.autograd.Variable(x.cuda())\n outputs = self.model(inputs)\n loss = self.criterion(inputs, outputs)\n\n self.losses.update(loss.item(), inputs.size(0))\n\n # compute gradient and do SGD step\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n def compute_scores(self):\n self.model.eval()\n losses = []\n reps = []\n y_test = []\n for batch_idx, (inputs, labels) in enumerate(self.testloader):\n inputs = torch.autograd.Variable(inputs.cuda())\n rep = self.model.encode(inputs)\n outputs = self.model.decode(rep)\n loss = outputs.sub(inputs).pow(2).view(outputs.size(0), -1)\n loss = loss.sum(dim=1, keepdim=False)\n losses.append(loss.data.cpu())\n reps.append(rep.data.cpu())\n y_test.append(labels.data.cpu())\n losses = torch.cat(losses, dim=0)\n reps = torch.cat(reps, dim=0)\n y_test = torch.cat(y_test, dim=0)\n losses = losses.numpy()\n losses = losses - losses.min()\n losses = losses / (1e-8 + losses.max())\n scores = 1 - losses\n return scores, reps.numpy(), y_test.numpy()\n\n def test(self, is_show=True):\n scores, reps, y_test = self.compute_scores()\n if is_show:\n roc_auc, pr_auc_norm, pr_auc_anom = show_roc_pr_curve_data(scores, y_test)\n self.print(\"auroc:{}, pr_auc_norm:{}, pr_auc_anom:{}\".format(roc_auc, pr_auc_norm, pr_auc_anom), False)\n else:\n res_file_path = self.get_result_file_path()\n save_roc_pr_curve_data(scores, y_test, res_file_path)\n\n # Use reps to train iforest\n clf = IsolationForest(contamination=self.p, n_jobs=4).fit(reps)\n scores_iforest = clf.decision_function(reps)\n iforest_file_path = self.get_result_file_path('iforest')\n save_roc_pr_curve_data(scores_iforest, y_test, iforest_file_path)",
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n__all__ = ['wrn']\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, dropRate=0.0):\n super(BasicBlock, self).__init__()\n\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.droprate = dropRate\n self.equalInOut = (in_planes == out_planes)\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False) or None\n def forward(self, x):\n if not self.equalInOut:\n x = self.relu1(self.bn1(x))\n else:\n out = self.relu1(self.bn1(x))\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n if self.droprate > 0:\n out = F.dropout(out, p=self.droprate, training=self.training)\n out = self.conv2(out)\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):\n super(NetworkBlock, self).__init__()\n self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):\n layers = []\n for i in range(nb_layers):\n layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))\n return nn.Sequential(*layers)\n def forward(self, x):\n return self.layer(x)\n\nclass WideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, in_channel=3):\n super(WideResNet, self).__init__()\n nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n n = (depth - 4) // 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = nn.Conv2d(in_channel, nChannels[0], kernel_size=3, stride=1,\n padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n self.final_act = nn.Softmax(dim=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = 1. / np.sqrt(m.weight.data.size(1))\n m.weight.data.uniform_(-n, n)\n m.bias.data.zero_()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n rep = out.view(-1, self.nChannels)\n out = self.fc(rep)\n return out, rep\n\n\nclass PCAWideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, in_channel=3, z_channels=10,\n mode='A', shareAB=True):\n super(PCAWideResNet, self).__init__()\n nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n n = (depth - 4) // 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = nn.Conv2d(in_channel, nChannels[0], kernel_size=3, stride=1,\n padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n self.final_act = nn.Softmax(dim=1)\n self.As = torch.nn.Parameter(torch.randn(num_classes,self.nChannels, z_channels))\n if not shareAB:\n self.Bs = torch.nn.Parameter(torch.randn(num_classes,self.nChannels, z_channels))\n self.shareAB = shareAB\n\n self.mode = mode\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = 1. / np.sqrt(m.weight.data.size(1))\n m.weight.data.uniform_(-n, n)\n m.bias.data.zero_()\n\n def forward(self, x, labels):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n y = out.view(-1, self.nChannels)\n A = self.As[labels]\n B = A if self.shareAB else self.Bs[labels]\n y_u = y.unsqueeze(dim=1)\n z = torch.matmul(y_u, A)\n y_rsr = torch.matmul(z, B.transpose(dim0=1, dim1=2)).squeeze(dim=1) #TODO\n if self.mode == 'A':\n out = self.fc(y_rsr)\n elif self.mode == 'B':\n out = self.fc(y)\n else:\n out = self.fc((y_rsr + y) / 2.)\n return out, y, y_rsr\n\n def predict(self, x, labels):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n y = out.view(-1, self.nChannels)\n A = self.As[labels]\n B = A if self.shareAB else self.Bs[labels]\n y_u = y.unsqueeze(dim=1)\n z = torch.matmul(y_u, A)\n y_rsr = torch.matmul(z, B.transpose(dim0=1, dim1=2)).squeeze(dim=1) #TODO\n outA = self.fc(y_rsr)\n outB = self.fc(y)\n outAB = self.fc(y_rsr * 0.01 + y * 0.99)\n return (outA, outB, outAB), y, y_rsr\n\ndef wrn(**kwargs):\n \"\"\"\n Constructs a Wide Residual Networks.\n \"\"\"\n model = WideResNet(**kwargs)\n return model\n\nclass MPCAWideResNet(nn.Module):\n def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, in_channel=3):\n super(MPCAWideResNet, self).__init__()\n nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n n = (depth - 4) // 6\n block = BasicBlock\n # 1st conv before any network block\n self.conv1 = nn.Conv2d(in_channel, nChannels[0], kernel_size=3, stride=1,\n padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)\n # 2nd block\n self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)\n # 3rd block\n self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(nChannels[3])\n self.relu = nn.ReLU(inplace=True)\n self.fc = nn.Linear(nChannels[3], num_classes)\n self.nChannels = nChannels[3]\n self.final_act = nn.Softmax(dim=1)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = 1. / np.sqrt(m.weight.data.size(1))\n m.weight.data.uniform_(-n, n)\n m.bias.data.zero_()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.block1(out)\n out = self.block2(out)\n out = self.block3(out)\n out = self.relu(self.bn1(out))\n out = F.avg_pool2d(out, 8)\n rep = out.view(-1, self.nChannels)\n out = self.fc(rep)\n return out, rep"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
],
[
"sklearn.ensemble.IsolationForest",
"torch.nn.MSELoss",
"torch.cat"
],
[
"torch.nn.Sequential",
"torch.nn.Softmax",
"torch.nn.functional.dropout",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
johnolafenwa/TorchFusion-Utils
|
[
"518ea86cc3113bbcfbed5f16467deab398c6bd5c"
] |
[
"torchfusion_utils/models/models.py"
] |
[
"#TO DO : SUMMARY, ONNX EXPORT, LIBTORCH EXPORT, LOADING, SAVING\n\nfrom torch.nn.parallel.data_parallel import DataParallel\nimport torch\nimport torch.nn as nn\nimport copy\nfrom ..fp16.fp16 import MultiSequential,Convert\nfrom collections import namedtuple\nimport os\n\n\ndef save_model(model,path,save_architecture=False):\n\n if type(model) == DataParallel:\n model = model.module\n\n if isinstance(model,MultiSequential):\n \n for child in model.children():\n\n if not isinstance(child,Convert):\n model = child \n break\n\n model = copy.deepcopy(model).float().cpu()\n\n if save_architecture:\n torch.save(model, path)\n else:\n state = model.state_dict()\n torch.save(state, path)\n\ndef load_model(model,path):\n checkpoint = torch.load(path, map_location=lambda storage, loc: storage)\n try:\n model.load_state_dict(checkpoint)\n except:\n copy = dict()\n for x, y in zip(model.state_dict(), checkpoint):\n new_name = y[y.index(x):]\n copy[new_name] = checkpoint[y]\n model.load_state_dict(copy)\n\n\ndef model_summary(model,*input_tensors,item_length=26):\n \"\"\"\n\n :param model:\n :param input_tensors:\n :param item_length:\n :param tensorboard_log:\n :return:\n \"\"\"\n\n summary = []\n\n ModuleDetails = namedtuple(\"Layer\", [\"name\", \"input_size\", \"output_size\",\"num_parameters\",\"multiply_adds\"])\n hooks = []\n layer_instances = {}\n\n def add_hooks(module):\n\n def hook(module, input, output):\n\n class_name = str(module.__class__.__name__)\n\n instance_index = 1\n if class_name not in layer_instances:\n layer_instances[class_name] = instance_index\n else:\n instance_index = layer_instances[class_name] + 1\n layer_instances[class_name] = instance_index\n\n layer_name = class_name + \"_\" + str(instance_index)\n\n params = 0\n if hasattr(module,\"weight\"):\n for param_ in module.parameters():\n params += param_.view(-1).size(0)\n\n flops = \"Not Available\"\n if class_name.find(\"Conv\") != -1 and hasattr(module, \"weight\"):\n flops = (\n torch.prod(torch.LongTensor(list(module.weight.data.size()))) * torch.prod(\n torch.LongTensor(list(output.size())[2:]))).item()\n\n elif isinstance(module, nn.Linear):\n\n flops = (torch.prod(torch.LongTensor(list(output.size()))) * input[0].size(1)).item()\n\n summary.append(\n ModuleDetails(name=layer_name, input_size=list(input[0].size()), output_size=list( output.size()), num_parameters=params, multiply_adds=flops))\n\n if not isinstance(module, nn.ModuleList) and not isinstance(module, nn.Sequential) and not isinstance(module,MultiSequential) and not isinstance(module,Convert) and module != model:\n hooks.append(module.register_forward_hook(hook))\n\n model.apply(add_hooks)\n\n space_len = item_length\n\n model(*input_tensors)\n for hook in hooks:\n hook.remove()\n\n details = \"Model Summary\" + os.linesep + \"Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}\".format(\n ' ' * (space_len - len(\"Name\")), ' ' * (space_len - len(\"Input Size\")),\n ' ' * (space_len - len(\"Output Size\")), ' ' * (space_len - len(\"Parameters\")),\n ' ' * (space_len - len(\"Multiply Adds (Flops)\"))) + os.linesep + '-' * space_len * 5 + os.linesep\n params_sum = 0\n flops_sum = 0\n for layer in summary:\n params_sum += layer.num_parameters\n if layer.multiply_adds != \"Not Available\":\n flops_sum += layer.multiply_adds\n details += \"{}{}{}{}{}{}{}{}{}{}\".format(layer.name, ' ' * (space_len - len(layer.name)), layer.input_size,\n ' ' * (space_len - len(str(layer.input_size))), layer.output_size,\n ' ' * (space_len - len(str(layer.output_size))),\n layer.num_parameters,\n ' ' * (space_len - len(str(layer.num_parameters))),\n layer.multiply_adds,\n ' ' * (space_len - len(str(layer.multiply_adds)))) + os.linesep + '-' * space_len * 5 + os.linesep\n\n\n details += os.linesep + \"Total Parameters: {}\".format(params_sum) + os.linesep + '-' * space_len * 5 + os.linesep\n details += \"Total Multiply Adds (For Convolution and Linear Layers only): {}\".format(flops_sum) + os.linesep + '-' * space_len * 5 + os.linesep\n details += \"Number of Layers\" + os.linesep\n for layer in layer_instances:\n details += \"{} : {} layers \".format(layer, layer_instances[layer])\n\n\n return details"
] |
[
[
"torch.save",
"torch.load"
]
] |
MartinThoma/PyBioMed
|
[
"9cac23d0958fd20040b38d5435ef1ff5d5c2cd13"
] |
[
"PyBioMed/PyMolecule/basak.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao\n# All rights reserved.\n# This file is part of the PyBioMed.\n# The contents are covered by the terms of the BSD license\n# which is included in the file license.txt, found at the root\n# of the PyBioMed source tree.\n\"\"\"\n##############################################################################\nThe calculation of some commonly used basak information index based on its\n\ntopological structure. You can get 21 molecular connectivity descriptors.\n\nYou can freely use and distribute it. If you hava any problem, you could\n\ncontact with us timely!\n\nAuthors: Zhijiang Yao and Dongsheng Cao.\n\nDate: 2016.06.04\n\nEmail: gadsby@163.\n\n##############################################################################\n\"\"\"\n\n\n# Core Library modules\nimport copy\n\n# Third party modules\nimport numpy\nfrom rdkit import Chem\n\nVersion = 1.0\n\n############################################################################\n\n\ndef _CalculateEntropy(Probability):\n \"\"\"\n #################################################################\n **Internal used only**\n\n Calculation of entropy (Information content) for probability given\n #################################################################\n \"\"\"\n res = 0.0\n for i in Probability:\n if i != 0:\n res = res - i * numpy.log2(i)\n\n return res\n\n\ndef CalculateBasakIC0(mol):\n\n \"\"\"\n #################################################################\n Obtain the information content with order 0 proposed by Basak\n\n ---->IC0\n #################################################################\n \"\"\"\n\n BasakIC = 0.0\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = []\n for i in range(nAtoms):\n at = Hmol.GetAtomWithIdx(i)\n IC.append(at.GetAtomicNum())\n Unique = numpy.unique(IC)\n NAtomType = len(Unique)\n NTAtomType = numpy.zeros(NAtomType, numpy.float)\n for i in range(NAtomType):\n NTAtomType[i] = IC.count(Unique[i])\n\n if nAtoms != 0:\n # print sum(NTAtomType/nAtoms)\n BasakIC = _CalculateEntropy(NTAtomType / nAtoms)\n else:\n BasakIC = 0.0\n\n return BasakIC\n\n\ndef CalculateBasakSIC0(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 0\n\n proposed by Basak\n\n ---->SIC0\n #################################################################\n \"\"\"\n\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC0(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakCIC0(mol):\n\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 0\n\n proposed by Basak\n\n ---->CIC0\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC0(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\ndef _CalculateBasakICn(mol, NumPath=1):\n\n \"\"\"\n #################################################################\n **internal used only**\n\n Obtain the information content with order n proposed by Basak\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n TotalPath = Chem.FindAllPathsOfLengthN(Hmol, NumPath, useBonds=0, useHs=1)\n if len(TotalPath) == 0:\n BasakIC = 0.0\n else:\n IC = {}\n for i in range(nAtoms):\n temp = []\n at = Hmol.GetAtomWithIdx(i)\n temp.append(at.GetAtomicNum())\n for index in TotalPath:\n if i == index[0]:\n temp.append(\n [Hmol.GetAtomWithIdx(kk).GetAtomicNum() for kk in index[1:]]\n )\n if i == index[-1]:\n cds = list(index)\n cds.reverse()\n temp.append(\n [Hmol.GetAtomWithIdx(kk).GetAtomicNum() for kk in cds[1:]]\n )\n # print temp\n\n IC[str(i)] = temp\n cds = []\n for value in IC.values():\n value.sort()\n cds.append(value)\n kkk = list(range(len(cds)))\n aaa = copy.deepcopy(kkk)\n res = []\n for i in aaa:\n if i in kkk:\n jishu = 0\n kong = []\n temp1 = cds[i]\n for j in aaa:\n if cds[j] == temp1:\n jishu = jishu + 1\n kong.append(j)\n for ks in kong:\n kkk.remove(ks)\n res.append(jishu)\n\n # print res\n BasakIC = _CalculateEntropy(numpy.array(res, numpy.float) / sum(res))\n\n return BasakIC\n\n\ndef CalculateBasakIC1(mol):\n \"\"\"\n #################################################################\n Obtain the information content with order 1 proposed by Basak\n\n ---->IC1\n #################################################################\n \"\"\"\n return _CalculateBasakICn(mol, NumPath=2)\n\n\ndef CalculateBasakIC2(mol):\n \"\"\"\n #################################################################\n Obtain the information content with order 2 proposed by Basak\n\n ---->IC2\n #################################################################\n \"\"\"\n return _CalculateBasakICn(mol, NumPath=3)\n\n\ndef CalculateBasakIC3(mol):\n \"\"\"\n #################################################################\n Obtain the information content with order 3 proposed by Basak\n\n ---->IC3\n #################################################################\n \"\"\"\n return _CalculateBasakICn(mol, NumPath=4)\n\n\ndef CalculateBasakIC4(mol):\n \"\"\"\n #################################################################\n Obtain the information content with order 4 proposed by Basak\n\n ---->IC4\n #################################################################\n \"\"\"\n return _CalculateBasakICn(mol, NumPath=5)\n\n\ndef CalculateBasakIC5(mol):\n \"\"\"\n #################################################################\n Obtain the information content with order 5 proposed by Basak\n\n ---->IC5\n #################################################################\n \"\"\"\n return _CalculateBasakICn(mol, NumPath=6)\n\n\ndef CalculateBasakIC6(mol):\n \"\"\"\n #################################################################\n Obtain the information content with order 6 proposed by Basak\n\n ---->IC6\n #################################################################\n \"\"\"\n return _CalculateBasakICn(mol, NumPath=7)\n\n\ndef CalculateBasakSIC1(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 1\n\n proposed by Basak.\n\n ---->SIC1\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC1(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakSIC2(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 2 proposed\n\n by Basak.\n\n ---->SIC2\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC2(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakSIC3(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 3 proposed\n\n by Basak.\n\n ---->SIC3\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC3(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakSIC4(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 4 proposed\n\n by Basak.\n\n ---->SIC4\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC4(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakSIC5(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 5 proposed\n\n by Basak.\n\n ---->SIC5\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC5(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakSIC6(mol):\n \"\"\"\n #################################################################\n Obtain the structural information content with order 6 proposed\n\n by Basak.\n\n ---->SIC6\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC6(mol)\n if nAtoms <= 1:\n BasakSIC = 0.0\n else:\n BasakSIC = IC / numpy.log2(nAtoms)\n\n return BasakSIC\n\n\ndef CalculateBasakCIC1(mol):\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 1 proposed\n\n by Basak.\n\n ---->CIC1\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC1(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\ndef CalculateBasakCIC2(mol):\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 2 proposed\n\n by Basak.\n\n ---->CIC2\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC2(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\ndef CalculateBasakCIC3(mol):\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 3 proposed\n\n by Basak.\n\n ---->CIC3\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC3(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\ndef CalculateBasakCIC4(mol):\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 4 proposed\n\n by Basak.\n\n ---->CIC4\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC4(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\ndef CalculateBasakCIC5(mol):\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 5 proposed\n\n by Basak.\n\n ---->CIC5\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC5(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\ndef CalculateBasakCIC6(mol):\n \"\"\"\n #################################################################\n Obtain the complementary information content with order 6 proposed\n\n by Basak.\n\n ---->CIC6\n #################################################################\n \"\"\"\n Hmol = Chem.AddHs(mol)\n nAtoms = Hmol.GetNumAtoms()\n IC = CalculateBasakIC6(mol)\n if nAtoms <= 1:\n BasakCIC = 0.0\n else:\n BasakCIC = numpy.log2(nAtoms) - IC\n\n return BasakCIC\n\n\n_basak = {\n \"CIC0\": CalculateBasakCIC0,\n \"CIC1\": CalculateBasakCIC1,\n \"CIC2\": CalculateBasakCIC2,\n \"CIC3\": CalculateBasakCIC3,\n \"CIC4\": CalculateBasakCIC4,\n \"CIC5\": CalculateBasakCIC5,\n \"CIC6\": CalculateBasakCIC6,\n \"SIC0\": CalculateBasakSIC0,\n \"SIC1\": CalculateBasakSIC1,\n \"SIC2\": CalculateBasakSIC2,\n \"SIC3\": CalculateBasakSIC3,\n \"SIC4\": CalculateBasakSIC4,\n \"SIC5\": CalculateBasakSIC5,\n \"SIC6\": CalculateBasakSIC6,\n \"IC0\": CalculateBasakIC0,\n \"IC1\": CalculateBasakIC1,\n \"IC2\": CalculateBasakIC2,\n \"IC3\": CalculateBasakIC3,\n \"IC4\": CalculateBasakIC4,\n \"IC5\": CalculateBasakIC5,\n \"IC6\": CalculateBasakIC6,\n}\n\n\ndef Getbasak(mol):\n \"\"\"\n #################################################################\n Get the dictionary of basak descriptors for given moelcule mol\n #################################################################\n \"\"\"\n result = {}\n for DesLabel in _basak.keys():\n result[DesLabel] = round(_basak[DesLabel](mol), 3)\n return result\n\n\ndef _GetHTMLDoc():\n \"\"\"\n #################################################################\n Write HTML documentation for this module.\n #################################################################\n \"\"\"\n import pydoc\n\n pydoc.writedoc(\"basak\")\n\n\n################################################################################\n\nif __name__ == \"__main__\":\n\n smi5 = [\"CCCCCC\", \"CCC(C)CC\", \"CC(C)CCC\", \"CC(C)C(C)C\", \"CCCCCN\", \"c1ccccc1N\"]\n for index, smi in enumerate(smi5):\n m = Chem.MolFromSmiles(smi)\n print(index + 1)\n print(smi)\n print(\"\\t\", Getbasak(m))\n print(len(Getbasak(m)))\n"
] |
[
[
"numpy.array",
"numpy.log2",
"numpy.zeros",
"numpy.unique"
]
] |
shenbai/tradesafe
|
[
"b6bb843288f535d7d146426fd40750f7484a16e6"
] |
[
"org/tradesafe/data/DataFether.py"
] |
[
"# coding:utf-8\n\nfrom datetime import datetime, timedelta\n# from urllib import Request, urlopen\nfrom urllib import request\n\nimport demjson\nimport pandas as pd\nimport tushare as ts\nfrom pandas.io import sql\n\nfrom org.tradesafe.bt.log import logging as log\nfrom org.tradesafe.conf import config\nfrom org.tradesafe.data.index_code_conf import indices\nfrom org.tradesafe.db import sqlite_db as db\nfrom org.tradesafe.utils import utils\n\nutils.mkdirs(config.log_dir)\nlog = utils.mylog\nslog = utils.statelog\nget_laste_update_dt = utils.get_laste_update_dt\n\nretry = 3\n# 历史数据\nsohu_history_api = 'http://q.stock.sohu.com/hisHq?code=%s&start=%s&end=%s&stat=1&order=D&period=d'\nsina_money_flow_api = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/MoneyFlow.ssl_qsfx_zjlrqs?page=1&num=%d&sort=opendate&asc=0&daima=%s'\ndefault_start_time = '20100101'\n\n\ndef append_stock_perfix(code):\n if code.startswith('00') or code.startswith('30'):\n return 'sz%s' % code\n if code.startswith('60'):\n return 'sh%s' % code\n\n\ndef get_all_stock_code():\n '''\n 返回全部股票代码\n '''\n conn = db.get_history_data_db()\n conn.text_factory = str\n try:\n df = pd.read_sql_query(\n 'select * from stock_basics where [timeToMarket] !=0', conn)\n return df['code']\n except Exception as e:\n log.error(e)\n\n\ndef updata_all_stock_basics():\n '''\n 更新所有股票的基本数据\n return\n DataFrame\n code,代码\n name,名称\n industry,细分行业\n area,地区\n pe,市盈率\n outstanding,流通股本\n totals,总股本(万)\n totalAssets,总资产(万)\n liquidAssets,流动资产\n fixedAssets,固定资产\n reserved,公积金\n reservedPerShare,每股公积金\n eps,每股收益\n bvps,每股净资\n pb,市净率\n timeToMarket,上市日期\n '''\n conn = db.get_history_data_db()\n conn.text_factory = str\n retry = 3\n for i in range(retry):\n try:\n df = ts.get_stock_basics()\n if not df.empty:\n sql_df = df.loc[:, :]\n sql.to_sql(sql_df, name='stock_basics', con=conn,\n index=True, if_exists='replace')\n log.info('all stock basics updated, total size=%d' % len(df))\n break\n except Exception as e:\n log.error(e)\n conn.close()\n\n\ndef download_history_data_fq(autype='qfq', startTime=None):\n '''\n 获取前复权的历史k线数据\n '''\n\n conn = db.get_history_data_db('D')\n start = startTime\n if startTime is None:\n start = utils.today_last_year(6)\n\n for code in get_all_stock_code():\n df = ts.get_h_data(code, start=start, drop_factor=False)\n if df is not None:\n try:\n df.insert(0, 'code', code)\n sql_df = df.loc[:, :]\n sql.to_sql(sql_df, name='history_data_%s' %\n autype, con=conn, index=True, if_exists='append')\n log.info('%s,%s history qfq data download ok.' % (code, start))\n except Exception as e:\n log.error('error:code:%s,start:%s' % (code, start))\n\n\ndef download_history_data(ktype='D', start=None, end=None, init_run=False):\n '''\n 获取近不复权的历史k线数据\n '''\n if init_run:\n start = default_start_time\n if end == None:\n end = datetime.today().date().strftime('%Y%m%d')\n\n conn = db.get_history_data_db(ktype)\n cost = 0\n cur_time = datetime.now()\n\n for code in get_all_stock_code():\n cost = datetime.now()\n if start is None:\n _start = get_laste_update_dt(code)\n if _start is not None:\n dt = datetime.strptime(_start, '%Y-%m-%d') + timedelta(days=1)\n start = datetime.strftime(dt, '%Y%m%d')\n else:\n row = conn.execute(config.sql_last_date_history_data_by_code % code).fetchone()\n if row is not None:\n start = row[0]\n dt = datetime.strptime(start, '%Y-%m-%d') + timedelta(days=1)\n start = datetime.strftime(dt, '%Y%m%d')\n else:\n start = default_start_time\n for i in range(retry):\n try:\n url = sohu_history_api % ('cn_' + code, start, end)\n text = request.urlopen(url, timeout=10).read()\n text = text.decode('GBK')\n log.info('url=%s,size=%d, try=%d' % (url, len(text), i))\n if len(text) < 20:\n continue\n j = demjson.decode(text, 'utf-8')\n head = ['date', 'open', 'close', 'chg', 'chg_r', 'low', 'high', 'vibration', 'volume',\n 'amount', 'turnover'] # 日期 开盘 收盘 涨跌额 涨跌幅 最低 最高 成交量(手) 成交金额(万)\n # 日期\t开盘\t收盘\t涨跌额\t涨跌幅\t最低\t最高\t成交量(手)\t成交金额(万)\t换手率\n data = []\n\n for x in j[0].get('hq'):\n date, open, close, change, _, low, high, valume, amount, turnover = x\n if '-' == turnover:\n turnover = '0.0%'\n chg_r = '%.4f' % ((float(close) - float(open)) / float(open))\n vibration = '%.4f' % float((float(high) - float(low)) / float(open))\n chg_r = float(chg_r)\n vibration = float(vibration)\n data.append(\n [date, float(open), float(close), float(change), float(chg_r), float(low), float(high),\n float(vibration), float(valume), float(amount), float(turnover[:-1])])\n\n df = pd.DataFrame(data, columns=head)\n if not df.empty:\n df.insert(1, 'code', code)\n sql_df = df.loc[:, :]\n sql.to_sql(sql_df, name='history_data', con=conn, index=False, if_exists='append')\n log.info('%s,%s,%d history data download ok.' % (code, str(start), len(sql_df)))\n slog.info('%s,%s' % (code, data[0][0]))\n break\n except Exception as e:\n log.error('error:code=%s,start=%s,msg=%s' % (code, start, e))\n if str(e).find('UNIQUE constraint') > -1:\n break\n log.debug('%s,costs:%d s' % (code, (datetime.now() - cost).seconds))\n conn.close()\n log.info('history data download complete. cost %d s' % (datetime.now() - cur_time).seconds)\n\n\ndef download_index_history_data(start=None, end=None, init_run=False):\n '''\n start:开始时间 yyyyMMdd,第一次调用空则取20100101,之后以数据表中最近时间为准\n end:结束时间 yyyyMMdd,空则取当前日期\n '''\n cur_time = datetime.now()\n conn = db.get_history_data_db()\n if init_run:\n start = default_start_time\n if start is None:\n try:\n onerow = conn.execute(config.sql_last_date_index_all).fetchone()\n if onerow is not None:\n start = onerow[0]\n dt = datetime.strptime(start, '%Y-%m-%d') + timedelta(days=1)\n start = datetime.strftime(dt, '%Y%m%d')\n else:\n start = default_start_time\n except Exception as e:\n start = default_start_time\n\n if end is None:\n end = datetime.today().date().strftime('%Y%m%d')\n print(start, end)\n if int(end) <= int(start):\n return None\n for code in indices.keys():\n for i in range(retry):\n try:\n url = sohu_history_api % (code, start, end)\n\n text = request.urlopen(url, timeout=10).read()\n text = text.decode('GBK')\n log.info('url=%s,size=%d, try=%d' % (url, len(text), i))\n if len(text) < 20:\n continue\n j = demjson.decode(text, 'utf-8')\n head = ['date', 'open', 'close', 'chg', 'chg_r', 'low', 'high', 'vibration', 'volume',\n 'amount'] # 日期 开盘 收盘 涨跌额 涨跌幅 最低 最高 成交量(手) 成交金额(万)\n # 日期\t开盘\t收盘\t涨跌额\t涨跌幅\t最低\t最高\t成交量(手)\t成交金额(万)\t换手率\n data = []\n for x in j[0].get('hq'):\n date, open, close, change, _, low, high, valume, amount, _ = x\n chg_r = '%.4f' % ((float(close) - float(open)) / float(open))\n vibration = '%.4f' % (float((float(high) - float(low)) / float(open)))\n # print date, vibration, str(float(vibration))\n data.append([date, float(open), float(close), float(change), float(chg_r), float(low), float(high),\n float(vibration), float(valume), float(amount)])\n\n # sql_str = 'insert OR IGNORE into all_index values(?,?,?,?,?,?,?,?,?,?,?)'\n # print len(data[0])\n # conn.executemany(sql_str, data)\n df = pd.DataFrame(data, columns=head)\n if not df.empty:\n df.insert(1, 'code', code)\n sql_df = df.loc[:, :]\n sql.to_sql(sql_df, name='all_index', con=conn, index=False, if_exists='append')\n log.info('%s,%s index history download ok.' % (code, start))\n break\n except Exception as e:\n log.error(e)\n conn.close()\n log.info('index history data download complete. cost %d s' % (datetime.now() - cur_time).seconds)\n\n\ndef download_money_flow_data(num=1000):\n '''\n get money flow from sina finance\n :param num:\n :return:\n '''\n conn = db.get_money_flow_db()\n cur_time = datetime.now()\n for code in get_all_stock_code():\n code = append_stock_perfix(code)\n cost = datetime.now()\n for i in range(retry):\n try:\n url = sina_money_flow_api % (num, code)\n text = request.urlopen(url, timeout=10).read()\n text = text.decode('GBK')\n log.info('url=%s,size=%d, try=%d' % (url, len(text), i))\n if len(text) < 10:\n continue\n # j = demjson.decode(text, 'utf-8') #json很大的时候效率非常查\n text = text[2:-2]\n j = text.replace('\"', '').split('},{')\n head = ['date', 'close', 'chg_r', 'turnover', 'netamount', 'ratio', 'zl_netamount', 'zl_ratio',\n 'cat_ratio']\n # 日期\t收盘价\t涨跌幅\t换手率\t净流入 \t净流入率\t主力净流入\t 主力净流入率\t行业净流入率\n data = []\n\n for x in j:\n m = {}\n for s in x.split(','):\n k, v = s.split(':')\n if '-' == v or 'null' == v:\n v = '0.0'\n m[k] = v\n date = m['opendate']\n close = float(m['trade'])\n chg_r = float(m['changeratio'])\n turnover = float(m['turnover']) / 10000\n netamount = float(m['netamount']) / 10000\n ratio = float(m['ratioamount'])\n zl_netamount = float(m['r0_net']) / 10000\n zl_ratio = float(m['r0_ratio'])\n cat_ratio = float(m['cate_ra'])\n data.append([date, close, chg_r, turnover, netamount, ratio, zl_netamount, zl_ratio, cat_ratio])\n\n df = pd.DataFrame(data, columns=head)\n log.info('data ok')\n if not df.empty:\n df.insert(1, 'code', code)\n sql_df = df.loc[:, :]\n sql.to_sql(sql_df, name='money_flow', con=conn, index=False, if_exists='append')\n log.info('%s,%s,%d money flow data download ok.' % (code, str(start), len(sql_df)))\n break\n except Exception as e:\n log.error('error:code=%s,start=%s,msg=%s' % (code, start, e))\n log.debug('%s,costs:%d s' % (code, (datetime.now() - cost).seconds))\n conn.close()\n log.info('money flow data download complete. cost %d s' % (datetime.now() - cur_time).seconds)\n\n pass\n\n\ndef download_dd_data(start=None):\n '''\n 获取大单数据\n '''\n conn = db.get_dd_data_db()\n start = start\n if start is None:\n start = utils.today_last_year(1)\n for code in get_all_stock_code():\n\n end = datetime.today().date()\n while start < end:\n date = end.strftime('%Y-%m-%d')\n df = ts.get_sina_dd(code=code, date=date, vol=500)\n if df is not None:\n df.insert(0, 'code', code)\n try:\n sql_df = df.loc[:, :]\n sql.to_sql(sql_df, name='dd_data', con=conn,\n index=True, if_exists='append')\n log.info('%s,%s dd data download ok.' % (code, start))\n except Exception as e:\n log.error('download error:%s,%s' % (code, date))\n pass\n start = start + timedelta(days=1)\n\n\nif __name__ == '__main__':\n # download_index_history_data(init_run=True)\n #\n # import sys\n # sys.exit(0)\n # exit(0)\n\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--update_basics', help='update all stock basics', action='store_true')\n parser.add_argument('-d', '--download', help='download history data', action='store_true')\n parser.add_argument('-dqfq', '--download_qfq', help='download history data (qfq)', action='store_true')\n parser.add_argument('-di', '--download_index', help='download index data', action='store_true')\n parser.add_argument('-dd', '--download_dd', help='download dd data', action='store_true')\n parser.add_argument('-dmf', '--download_money_flow', help='down load money flow data', action='store_true')\n parser.add_argument('--start', help='start time', action='store')\n parser.add_argument('--end', help='end time', action='store')\n parser.add_argument('--num', help='number of items per page', action='store')\n parser.add_argument('--init_run', help='init_run or not', action='store')\n\n args = parser.parse_args()\n\n start, end, num = None, None, None\n if args.start is not None:\n start = args.start\n if args.end is not None:\n end = args.end\n if args.num is not None:\n num = args.num\n\n if args.update_basics:\n # download basics\n updata_all_stock_basics()\n\n if args.download:\n if args.init_run:\n download_history_data(start=start, end=end, init_run=True)\n else:\n download_history_data(start=start, end=end)\n\n if args.download_qfq:\n download_history_data_fq()\n\n if args.download_index:\n if args.init_run:\n download_index_history_data(start=start, end=end, init_run=True)\n else:\n download_index_history_data(start=start, end=end)\n\n if args.download_dd:\n download_dd_data()\n\n if args.download_money_flow:\n if args.num is not None:\n download_money_flow_data(int(args.num))\n else:\n download_money_flow_data()\n"
] |
[
[
"pandas.read_sql_query",
"pandas.io.sql.to_sql",
"pandas.DataFrame"
]
] |
The-very-most-awesome-team-of-cool-kids/02463_Active_Learning
|
[
"abc35a31996de1c2e3275cf946b6a44f62abb781"
] |
[
"AL_scripts/dataloader.py"
] |
[
"import numpy as np\nimport torch\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nfrom LOAD_XRAY import concat_, zeropad, Dataload as concat_, zeropad, Dataload\nimport os\n\n\ndef get_dataset(name):\n \"\"\"\n Gets data set:\n --------------------------------------\n Parameters:\n \n name: The name of the wanted data set, options are: \"CIFAR10\"\n --------------------------------------\n Outputs:\n\n X_tr: The training data\n Y_tr: The training labels\n X_te: The test data\n Y_te: The test labels\n \"\"\"\n\n if name.upper() == \"CIFAR10\":\n data_tr = datasets.CIFAR10('./CIFAR10', train=True, download=True)\n data_te = datasets.CIFAR10('./CIFAR10', train=False, download=True)\n X_tr = data_tr.train_data\n Y_tr = torch.from_numpy(np.array(data_tr.train_labels))\n X_te = data_te.test_data\n Y_te = torch.from_numpy(np.array(data_te.test_labels))\n elif name.upper() == \"XRAY\":\n size = 256\n if not os.path.exists(\"Egne_filer/Train/chest_xray/\"):\n from DATA_kaggle import data_from_kaggle\n print(\"Preparing data from Kaggle...\")\n data_from_kaggle(size)\n path_train =\"Egne_filer/Train/chest_xray/train/\"\n path_test = \"Egne_filer/Test/chest_xray/test/\"\n X0_tr, y0_tr = Dataload(path_train, \"NORMAL\", size)\n X1_tr, y1_tr = Dataload(path_train, \"PNEUMONIA\", size)\n \n X_tr = np.concatenate((X0_tr,X1_tr),axis=0) \n Y_tr = np.concatenate((y0_tr,y1_tr))\n\n X0_te, y0_te = Dataload(path_test, \"NORMAL\", size)\n X1_te, y1_te = Dataload(path_test, \"PNEUMONIA\", size)\n \n X_te = np.concatenate((X0_te,X1_te),axis=0) \n Y_te = np.concatenate((y0_te,y1_te))\n \n Y_tr = torch.from_numpy(Y_tr)\n Y_te = torch.from_numpy(Y_te)\n \n return X_tr, Y_tr, X_te, Y_te\n\n\ndef get_handler(name):\n if name.upper() == \"CIFAR10\":\n return handler1\n elif name.upper() == \"XRAY\":\n return handler2\n \n\ndef get_args(name):\n if name.upper() == \"CIFAR10\":\n return {'n_epoch': 1,\n 'transform': transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),\n 'loader_tr_args':{'batch_size': 4, 'num_workers': 1},\n 'loader_te_args':{'batch_size': 1000, 'num_workers': 1},\n 'optimizer_args':{'lr': 0.0009}}\n if name.upper() == \"XRAY\":\n return {'n_epoch': 1,\n 'transform': transforms.Compose([transforms.ToTensor(), \n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), \n # transforms.Resize(size=256),\n # transforms.CenterCrop(size=224)]),\n 'loader_tr_args':{'batch_size': 64, 'num_workers': 1},\n 'loader_te_args':{'batch_size': 78, 'num_workers': 1},\n 'optimizer_args':{'lr': 0.0009}}\n\n\n\nclass handler1(Dataset):\n def __init__(self, X, Y, transform = None):\n self.X = X\n self.Y = Y\n self.transform = transform\n\n def __getitem__(self, index):\n x, y = self.X[index], self.Y[index]\n if self.transform is not None:\n x = Image.fromarray(x)\n x = self.transform(x)\n return x, y, index\n\n def __len__(self):\n return len(self.X)\n\nclass handler2(Dataset):\n def __init__(self, X, Y, transform = None):\n self.X = X\n self.Y = Y\n self.transform = transform\n\n def __getitem__(self, index):\n x, y = self.X[index], self.Y[index]\n if self.transform is not None:\n h, w = np.shape(x)\n # print(type(x), np.shape(x))\n x = np.reshape(x, (h, w, 1))\n # print(type(x), np.shape(x))\n # # x = torch.FloatTensor(np.shape(x))\n # # x = transforms.functional.to_pil_image(x)\n # # x = Image.fromarray((x * 255).astype(np.uint8))\n # x = Image.fromarray(x)\n x = self.transform(x)\n return x, y, index\n\n def __len__(self):\n return len(self.X)"
] |
[
[
"numpy.reshape",
"torch.from_numpy",
"numpy.concatenate",
"numpy.shape",
"numpy.array"
]
] |
deligentfool/SIDE
|
[
"561fc6c5312906fd2073af043c2c17ec4ea3758d"
] |
[
"src/envs/starcraft2/starcraft2.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom envs.multiagentenv import MultiAgentEnv\nfrom smac.env.starcraft2.maps import get_map_params\n\nimport atexit\nfrom operator import attrgetter\nfrom copy import deepcopy\nimport numpy as np\nimport enum\nimport math\nfrom absl import logging\n\nfrom pysc2 import maps\nfrom pysc2 import run_configs\nfrom pysc2.lib import protocol\n\nfrom s2clientprotocol import common_pb2 as sc_common\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\nfrom s2clientprotocol import raw_pb2 as r_pb\nfrom s2clientprotocol import debug_pb2 as d_pb\n\nraces = {\n \"R\": sc_common.Random,\n \"P\": sc_common.Protoss,\n \"T\": sc_common.Terran,\n \"Z\": sc_common.Zerg,\n}\n\ndifficulties = {\n \"1\": sc_pb.VeryEasy,\n \"2\": sc_pb.Easy,\n \"3\": sc_pb.Medium,\n \"4\": sc_pb.MediumHard,\n \"5\": sc_pb.Hard,\n \"6\": sc_pb.Harder,\n \"7\": sc_pb.VeryHard,\n \"8\": sc_pb.CheatVision,\n \"9\": sc_pb.CheatMoney,\n \"A\": sc_pb.CheatInsane,\n}\n\nactions = {\n \"move\": 16, # target: PointOrUnit\n \"attack\": 23, # target: PointOrUnit\n \"stop\": 4, # target: None\n \"heal\": 386, # Unit\n}\n\n\nclass Direction(enum.IntEnum):\n NORTH = 0\n SOUTH = 1\n EAST = 2\n WEST = 3\n\n\nclass StarCraft2Env(MultiAgentEnv):\n \"\"\"The StarCraft II environment for decentralised multi-agent\n micromanagement scenarios.\n \"\"\"\n def __init__(\n self,\n map_name=\"8m\",\n step_mul=8,\n move_amount=2,\n difficulty=\"7\",\n game_version=None,\n seed=None,\n continuing_episode=False,\n obs_all_health=True,\n obs_own_health=True,\n obs_last_action=False,\n obs_pathing_grid=False,\n obs_terrain_height=False,\n obs_instead_of_state=False,\n obs_timestep_number=False,\n state_last_action=True,\n state_timestep_number=False,\n reward_sparse=False,\n reward_only_positive=True,\n reward_death_value=10,\n reward_win=200,\n reward_defeat=0,\n reward_negative_scale=0.5,\n reward_scale=True,\n reward_scale_rate=20,\n replay_dir=\"\",\n replay_prefix=\"\",\n window_size_x=1920,\n window_size_y=1200,\n heuristic_ai=False,\n heuristic_rest=False,\n debug=False,\n ):\n \"\"\"\n Create a StarCraftC2Env environment.\n\n Parameters\n ----------\n map_name : str, optional\n The name of the SC2 map to play (default is \"8m\"). The full list\n can be found by running bin/map_list.\n step_mul : int, optional\n How many game steps per agent step (default is 8). None\n indicates to use the default map step_mul.\n move_amount : float, optional\n How far away units are ordered to move per step (default is 2).\n difficulty : str, optional\n The difficulty of built-in computer AI bot (default is \"7\").\n game_version : str, optional\n StarCraft II game version (default is None). None indicates the\n latest version.\n seed : int, optional\n Random seed used during game initialisation. This allows to\n continuing_episode : bool, optional\n Whether to consider episodes continuing or finished after time\n limit is reached (default is False).\n obs_all_health : bool, optional\n Agents receive the health of all units (in the sight range) as part\n of observations (default is True).\n obs_own_health : bool, optional\n Agents receive their own health as a part of observations (default\n is False). This flag is ignored when obs_all_health == True.\n obs_last_action : bool, optional\n Agents receive the last actions of all units (in the sight range)\n as part of observations (default is False).\n obs_pathing_grid : bool, optional\n Whether observations include pathing values surrounding the agent\n (default is False).\n obs_terrain_height : bool, optional\n Whether observations include terrain height values surrounding the\n agent (default is False).\n obs_instead_of_state : bool, optional\n Use combination of all agents' observations as the global state\n (default is False).\n obs_timestep_number : bool, optional\n Whether observations include the current timestep of the episode\n (default is False).\n state_last_action : bool, optional\n Include the last actions of all agents as part of the global state\n (default is True).\n state_timestep_number : bool, optional\n Whether the state include the current timestep of the episode\n (default is False).\n reward_sparse : bool, optional\n Receive 1/-1 reward for winning/loosing an episode (default is\n False). Whe rest of reward parameters are ignored if True.\n reward_only_positive : bool, optional\n Reward is always positive (default is True).\n reward_death_value : float, optional\n The amount of reward received for killing an enemy unit (default\n is 10). This is also the negative penalty for having an allied unit\n killed if reward_only_positive == False.\n reward_win : float, optional\n The reward for winning in an episode (default is 200).\n reward_defeat : float, optional\n The reward for loosing in an episode (default is 0). This value\n should be nonpositive.\n reward_negative_scale : float, optional\n Scaling factor for negative rewards (default is 0.5). This\n parameter is ignored when reward_only_positive == True.\n reward_scale : bool, optional\n Whether or not to scale the reward (default is True).\n reward_scale_rate : float, optional\n Reward scale rate (default is 20). When reward_scale == True, the\n reward received by the agents is divided by (max_reward /\n reward_scale_rate), where max_reward is the maximum possible\n reward per episode without considering the shield regeneration\n of Protoss units.\n replay_dir : str, optional\n The directory to save replays (default is None). If None, the\n replay will be saved in Replays directory where StarCraft II is\n installed.\n replay_prefix : str, optional\n The prefix of the replay to be saved (default is None). If None,\n the name of the map will be used.\n window_size_x : int, optional\n The length of StarCraft II window size (default is 1920).\n window_size_y: int, optional\n The height of StarCraft II window size (default is 1200).\n heuristic_ai: bool, optional\n Whether or not to use a non-learning heuristic AI (default False).\n heuristic_rest: bool, optional\n At any moment, restrict the actions of the heuristic AI to be\n chosen from actions available to RL agents (default is False).\n Ignored if heuristic_ai == False.\n debug: bool, optional\n Log messages about observations, state, actions and rewards for\n debugging purposes (default is False).\n \"\"\"\n # Map arguments\n self.map_name = map_name\n map_params = get_map_params(self.map_name)\n self.n_agents = map_params[\"n_agents\"]\n self.n_enemies = map_params[\"n_enemies\"]\n self.episode_limit = map_params[\"limit\"]\n self._move_amount = move_amount\n self._step_mul = step_mul\n self.difficulty = difficulty\n\n # Observations and state\n self.obs_own_health = obs_own_health\n self.obs_all_health = obs_all_health\n self.obs_instead_of_state = obs_instead_of_state\n self.obs_last_action = obs_last_action\n self.obs_pathing_grid = obs_pathing_grid\n self.obs_terrain_height = obs_terrain_height\n self.obs_timestep_number = obs_timestep_number\n self.state_last_action = state_last_action\n self.state_timestep_number = state_timestep_number\n if self.obs_all_health:\n self.obs_own_health = True\n self.n_obs_pathing = 8\n self.n_obs_height = 9\n\n # Rewards args\n self.reward_sparse = reward_sparse\n self.reward_only_positive = reward_only_positive\n self.reward_negative_scale = reward_negative_scale\n self.reward_death_value = reward_death_value\n self.reward_win = reward_win\n self.reward_defeat = reward_defeat\n self.reward_scale = reward_scale\n self.reward_scale_rate = reward_scale_rate\n\n # Other\n self.game_version = game_version\n self.continuing_episode = continuing_episode\n self._seed = seed\n self.heuristic_ai = heuristic_ai\n self.heuristic_rest = heuristic_rest\n self.debug = debug\n self.window_size = (window_size_x, window_size_y)\n self.replay_dir = replay_dir\n self.replay_prefix = replay_prefix\n\n # Actions\n self.n_actions_no_attack = 6\n self.n_actions_move = 4\n self.n_actions = self.n_actions_no_attack + self.n_enemies\n\n # Map info\n self._agent_race = map_params[\"a_race\"]\n self._bot_race = map_params[\"b_race\"]\n self.shield_bits_ally = 1 if self._agent_race == \"P\" else 0\n self.shield_bits_enemy = 1 if self._bot_race == \"P\" else 0\n self.unit_type_bits = map_params[\"unit_type_bits\"]\n self.map_type = map_params[\"map_type\"]\n\n self.max_reward = (\n self.n_enemies * self.reward_death_value + self.reward_win\n )\n\n self.agents = {}\n self.enemies = {}\n self._episode_count = 0\n self._episode_steps = 0\n self._total_steps = 0\n self._obs = None\n self.battles_won = 0\n self.battles_game = 0\n self.timeouts = 0\n self.force_restarts = 0\n self.last_stats = None\n self.death_tracker_ally = np.zeros(self.n_agents)\n self.death_tracker_enemy = np.zeros(self.n_enemies)\n self.previous_ally_units = None\n self.previous_enemy_units = None\n self.last_action = np.zeros((self.n_agents, self.n_actions))\n self._min_unit_type = 0\n self.marine_id = self.marauder_id = self.medivac_id = 0\n self.hydralisk_id = self.zergling_id = self.baneling_id = 0\n self.stalker_id = self.colossus_id = self.zealot_id = 0\n self.max_distance_x = 0\n self.max_distance_y = 0\n self.map_x = 0\n self.map_y = 0\n self.terrain_height = None\n self.pathing_grid = None\n self._run_config = None\n self._sc2_proc = None\n self._controller = None\n\n # Qatten\n self.unit_dim = 4 + self.shield_bits_ally + self.unit_type_bits\n \n # Try to avoid leaking SC2 processes on shutdown\n atexit.register(lambda: self.close())\n\n def _launch(self):\n \"\"\"Launch the StarCraft II game.\"\"\"\n self._run_config = run_configs.get(version=self.game_version)\n _map = maps.get(self.map_name)\n\n # Setting up the interface\n interface_options = sc_pb.InterfaceOptions(raw=True, score=False)\n self._sc2_proc = self._run_config.start(window_size=self.window_size, want_rgb=False)\n self._controller = self._sc2_proc.controller\n\n # Request to create the game\n create = sc_pb.RequestCreateGame(\n local_map=sc_pb.LocalMap(\n map_path=_map.path,\n map_data=self._run_config.map_data(_map.path)),\n realtime=False,\n random_seed=self._seed)\n create.player_setup.add(type=sc_pb.Participant)\n create.player_setup.add(type=sc_pb.Computer, race=races[self._bot_race],\n difficulty=difficulties[self.difficulty])\n self._controller.create_game(create)\n\n join = sc_pb.RequestJoinGame(race=races[self._agent_race],\n options=interface_options)\n self._controller.join_game(join)\n\n game_info = self._controller.game_info()\n map_info = game_info.start_raw\n map_play_area_min = map_info.playable_area.p0\n map_play_area_max = map_info.playable_area.p1\n self.max_distance_x = map_play_area_max.x - map_play_area_min.x\n self.max_distance_y = map_play_area_max.y - map_play_area_min.y\n self.map_x = map_info.map_size.x\n self.map_y = map_info.map_size.y\n\n if map_info.pathing_grid.bits_per_pixel == 1:\n vals = np.array(list(map_info.pathing_grid.data)).reshape(\n self.map_x, int(self.map_y / 8))\n self.pathing_grid = np.transpose(np.array([\n [(b >> i) & 1 for b in row for i in range(7, -1, -1)]\n for row in vals], dtype=np.bool))\n else:\n self.pathing_grid = np.invert(np.flip(np.transpose(np.array(\n list(map_info.pathing_grid.data), dtype=np.bool).reshape(\n self.map_x, self.map_y)), axis=1))\n\n self.terrain_height = np.flip(\n np.transpose(np.array(list(map_info.terrain_height.data))\n .reshape(self.map_x, self.map_y)), 1) / 255\n\n def reset(self):\n \"\"\"Reset the environment. Required after each full episode.\n Returns initial observations and states.\n \"\"\"\n self._episode_steps = 0\n if self._episode_count == 0:\n # Launch StarCraft II\n self._launch()\n else:\n self._restart()\n\n # Information kept for counting the reward\n self.death_tracker_ally = np.zeros(self.n_agents)\n self.death_tracker_enemy = np.zeros(self.n_enemies)\n self.previous_ally_units = None\n self.previous_enemy_units = None\n self.win_counted = False\n self.defeat_counted = False\n\n self.last_action = np.zeros((self.n_agents, self.n_actions))\n\n if self.heuristic_ai:\n self.heuristic_targets = [None] * self.n_agents\n\n try:\n self._obs = self._controller.observe()\n self.init_units()\n except (protocol.ProtocolError, protocol.ConnectionError):\n self.full_restart()\n\n if self.debug:\n logging.debug(\"Started Episode {}\"\n .format(self._episode_count).center(60, \"*\"))\n\n return self.get_obs(), self.get_state()\n\n def _restart(self):\n \"\"\"Restart the environment by killing all units on the map.\n There is a trigger in the SC2Map file, which restarts the\n episode when there are no units left.\n \"\"\"\n try:\n self._kill_all_units()\n self._controller.step(2)\n except (protocol.ProtocolError, protocol.ConnectionError):\n self.full_restart()\n\n def full_restart(self):\n \"\"\"Full restart. Closes the SC2 process and launches a new one. \"\"\"\n self._sc2_proc.close()\n self._launch()\n self.force_restarts += 1\n\n def step(self, actions):\n \"\"\"A single environment step. Returns reward, terminated, info.\"\"\"\n actions_int = [int(a) for a in actions]\n\n self.last_action = np.eye(self.n_actions)[np.array(actions_int)]\n\n # Collect individual actions\n sc_actions = []\n if self.debug:\n logging.debug(\"Actions\".center(60, \"-\"))\n\n for a_id, action in enumerate(actions_int):\n if not self.heuristic_ai:\n sc_action = self.get_agent_action(a_id, action)\n else:\n sc_action, action_num = self.get_agent_action_heuristic(\n a_id, action)\n actions[a_id] = action_num\n if sc_action:\n sc_actions.append(sc_action)\n\n # Send action request\n req_actions = sc_pb.RequestAction(actions=sc_actions)\n try:\n self._controller.actions(req_actions)\n # Make step in SC2, i.e. apply actions\n self._controller.step(self._step_mul)\n # Observe here so that we know if the episode is over.\n self._obs = self._controller.observe()\n except (protocol.ProtocolError, protocol.ConnectionError):\n self.full_restart()\n return 0, True, {}\n\n self._total_steps += 1\n self._episode_steps += 1\n\n # Update units\n game_end_code = self.update_units()\n\n terminated = False\n reward = self.reward_battle()\n info = {\"battle_won\": False}\n\n # count units that are still alive\n dead_allies, dead_enemies = 0, 0\n for al_id, al_unit in self.agents.items():\n if al_unit.health == 0:\n dead_allies += 1\n for e_id, e_unit in self.enemies.items():\n if e_unit.health == 0:\n dead_enemies += 1\n\n info['dead_allies'] = dead_allies\n info['dead_enemies'] = dead_enemies\n\n if game_end_code is not None:\n # Battle is over\n terminated = True\n self.battles_game += 1\n if game_end_code == 1 and not self.win_counted:\n self.battles_won += 1\n self.win_counted = True\n info[\"battle_won\"] = True\n if not self.reward_sparse:\n reward += self.reward_win\n else:\n reward = 1\n elif game_end_code == -1 and not self.defeat_counted:\n self.defeat_counted = True\n if not self.reward_sparse:\n reward += self.reward_defeat\n else:\n reward = -1\n\n elif self._episode_steps >= self.episode_limit:\n # Episode limit reached\n terminated = True\n if self.continuing_episode:\n info[\"episode_limit\"] = True\n self.battles_game += 1\n self.timeouts += 1\n\n if self.debug:\n logging.debug(\"Reward = {}\".format(reward).center(60, '-'))\n\n if terminated:\n self._episode_count += 1\n\n if self.reward_scale:\n reward /= self.max_reward / self.reward_scale_rate\n\n return reward, terminated, info\n\n def get_agent_action(self, a_id, action):\n \"\"\"Construct the action for agent a_id.\"\"\"\n avail_actions = self.get_avail_agent_actions(a_id)\n assert avail_actions[action] == 1, \\\n \"Agent {} cannot perform action {}\".format(a_id, action)\n\n unit = self.get_unit_by_id(a_id)\n tag = unit.tag\n x = unit.pos.x\n y = unit.pos.y\n\n if action == 0:\n # no-op (valid only when dead)\n assert unit.health == 0, \"No-op only available for dead agents.\"\n if self.debug:\n logging.debug(\"Agent {}: Dead\".format(a_id))\n return None\n elif action == 1:\n # stop\n cmd = r_pb.ActionRawUnitCommand(\n ability_id=actions[\"stop\"],\n unit_tags=[tag],\n queue_command=False)\n if self.debug:\n logging.debug(\"Agent {}: Stop\".format(a_id))\n\n elif action == 2:\n # move north\n cmd = r_pb.ActionRawUnitCommand(\n ability_id=actions[\"move\"],\n target_world_space_pos=sc_common.Point2D(\n x=x, y=y + self._move_amount),\n unit_tags=[tag],\n queue_command=False)\n if self.debug:\n logging.debug(\"Agent {}: Move North\".format(a_id))\n\n elif action == 3:\n # move south\n cmd = r_pb.ActionRawUnitCommand(\n ability_id=actions[\"move\"],\n target_world_space_pos=sc_common.Point2D(\n x=x, y=y - self._move_amount),\n unit_tags=[tag],\n queue_command=False)\n if self.debug:\n logging.debug(\"Agent {}: Move South\".format(a_id))\n\n elif action == 4:\n # move east\n cmd = r_pb.ActionRawUnitCommand(\n ability_id=actions[\"move\"],\n target_world_space_pos=sc_common.Point2D(\n x=x + self._move_amount, y=y),\n unit_tags=[tag],\n queue_command=False)\n if self.debug:\n logging.debug(\"Agent {}: Move East\".format(a_id))\n\n elif action == 5:\n # move west\n cmd = r_pb.ActionRawUnitCommand(\n ability_id=actions[\"move\"],\n target_world_space_pos=sc_common.Point2D(\n x=x - self._move_amount, y=y),\n unit_tags=[tag],\n queue_command=False)\n if self.debug:\n logging.debug(\"Agent {}: Move West\".format(a_id))\n else:\n # attack/heal units that are in range\n target_id = action - self.n_actions_no_attack\n if self.map_type == \"MMM\" and unit.unit_type == self.medivac_id:\n target_unit = self.agents[target_id]\n action_name = \"heal\"\n else:\n target_unit = self.enemies[target_id]\n action_name = \"attack\"\n\n action_id = actions[action_name]\n target_tag = target_unit.tag\n\n cmd = r_pb.ActionRawUnitCommand(\n ability_id=action_id,\n target_unit_tag=target_tag,\n unit_tags=[tag],\n queue_command=False)\n\n if self.debug:\n logging.debug(\"Agent {} {}s unit # {}\".format(\n a_id, action_name, target_id))\n\n sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))\n return sc_action\n\n def get_agent_action_heuristic(self, a_id, action):\n unit = self.get_unit_by_id(a_id)\n tag = unit.tag\n\n target = self.heuristic_targets[a_id]\n if unit.unit_type == self.medivac_id:\n if (target is None or self.agents[target].health == 0 or\n self.agents[target].health == self.agents[target].health_max):\n min_dist = math.hypot(self.max_distance_x, self.max_distance_y)\n min_id = -1\n for al_id, al_unit in self.agents.items():\n if al_unit.unit_type == self.medivac_id:\n continue\n if (al_unit.health != 0 and\n al_unit.health != al_unit.health_max):\n dist = self.distance(unit.pos.x, unit.pos.y,\n al_unit.pos.x, al_unit.pos.y)\n if dist < min_dist:\n min_dist = dist\n min_id = al_id\n self.heuristic_targets[a_id] = min_id\n if min_id == -1:\n self.heuristic_targets[a_id] = None\n return None, 0\n action_id = actions['heal']\n target_tag = self.agents[self.heuristic_targets[a_id]].tag\n else:\n if target is None or self.enemies[target].health == 0:\n min_dist = math.hypot(self.max_distance_x, self.max_distance_y)\n min_id = -1\n for e_id, e_unit in self.enemies.items():\n if (unit.unit_type == self.marauder_id and\n e_unit.unit_type == self.medivac_id):\n continue\n if e_unit.health > 0:\n dist = self.distance(unit.pos.x, unit.pos.y,\n e_unit.pos.x, e_unit.pos.y)\n if dist < min_dist:\n min_dist = dist\n min_id = e_id\n self.heuristic_targets[a_id] = min_id\n if min_id == -1:\n self.heuristic_targets[a_id] = None\n return None, 0\n action_id = actions['attack']\n target_tag = self.enemies[self.heuristic_targets[a_id]].tag\n\n action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack\n\n # Check if the action is available\n if (self.heuristic_rest and\n self.get_avail_agent_actions(a_id)[action_num] == 0):\n\n # Move towards the target rather than attacking/healing\n if unit.unit_type == self.medivac_id:\n target_unit = self.agents[self.heuristic_targets[a_id]]\n else:\n target_unit = self.enemies[self.heuristic_targets[a_id]]\n\n delta_x = target_unit.pos.x - unit.pos.x\n delta_y = target_unit.pos.y - unit.pos.y\n\n if abs(delta_x) > abs(delta_y): # east or west\n if delta_x > 0: # east\n target_pos=sc_common.Point2D(\n x=unit.pos.x + self._move_amount, y=unit.pos.y)\n action_num = 4\n else: # west\n target_pos=sc_common.Point2D(\n x=unit.pos.x - self._move_amount, y=unit.pos.y)\n action_num = 5\n else: # north or south\n if delta_y > 0: # north\n target_pos=sc_common.Point2D(\n x=unit.pos.x, y=unit.pos.y + self._move_amount)\n action_num = 2\n else: # south\n target_pos=sc_common.Point2D(\n x=unit.pos.x, y=unit.pos.y - self._move_amount)\n action_num = 3\n\n cmd = r_pb.ActionRawUnitCommand(\n ability_id = actions['move'],\n target_world_space_pos = target_pos,\n unit_tags = [tag],\n queue_command = False)\n else:\n # Attack/heal the target\n cmd = r_pb.ActionRawUnitCommand(\n ability_id = action_id,\n target_unit_tag = target_tag,\n unit_tags = [tag],\n queue_command = False)\n\n sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))\n return sc_action, action_num\n\n def reward_battle(self):\n \"\"\"Reward function when self.reward_spare==False.\n Returns accumulative hit/shield point damage dealt to the enemy\n + reward_death_value per enemy unit killed, and, in case\n self.reward_only_positive == False, - (damage dealt to ally units\n + reward_death_value per ally unit killed) * self.reward_negative_scale\n \"\"\"\n if self.reward_sparse:\n return 0\n\n reward = 0\n delta_deaths = 0\n delta_ally = 0\n delta_enemy = 0\n\n neg_scale = self.reward_negative_scale\n\n # update deaths\n for al_id, al_unit in self.agents.items():\n if not self.death_tracker_ally[al_id]:\n # did not die so far\n prev_health = (\n self.previous_ally_units[al_id].health\n + self.previous_ally_units[al_id].shield\n )\n if al_unit.health == 0:\n # just died\n self.death_tracker_ally[al_id] = 1\n if not self.reward_only_positive:\n delta_deaths -= self.reward_death_value * neg_scale\n delta_ally += prev_health * neg_scale\n else:\n # still alive\n delta_ally += neg_scale * (\n prev_health - al_unit.health - al_unit.shield\n )\n\n for e_id, e_unit in self.enemies.items():\n if not self.death_tracker_enemy[e_id]:\n prev_health = (\n self.previous_enemy_units[e_id].health\n + self.previous_enemy_units[e_id].shield\n )\n if e_unit.health == 0:\n self.death_tracker_enemy[e_id] = 1\n delta_deaths += self.reward_death_value\n delta_enemy += prev_health\n else:\n delta_enemy += prev_health - e_unit.health - e_unit.shield\n\n if self.reward_only_positive:\n reward = abs(delta_enemy + delta_deaths) # shield regeneration\n else:\n reward = delta_enemy + delta_deaths - delta_ally\n\n return reward\n\n def get_total_actions(self):\n \"\"\"Returns the total number of actions an agent could ever take.\"\"\"\n return self.n_actions\n\n @staticmethod\n def distance(x1, y1, x2, y2):\n \"\"\"Distance between two points.\"\"\"\n return math.hypot(x2 - x1, y2 - y1)\n\n def unit_shoot_range(self, agent_id):\n \"\"\"Returns the shooting range for an agent.\"\"\"\n return 6\n\n def unit_sight_range(self, agent_id):\n \"\"\"Returns the sight range for an agent.\"\"\"\n return 9\n\n def unit_max_cooldown(self, unit):\n \"\"\"Returns the maximal cooldown for a unit.\"\"\"\n switcher = {\n self.marine_id: 15,\n self.marauder_id: 25,\n self.medivac_id: 200, # max energy\n self.stalker_id: 35,\n self.zealot_id: 22,\n self.colossus_id: 24,\n self.hydralisk_id: 10,\n self.zergling_id: 11,\n self.baneling_id: 1\n }\n return switcher.get(unit.unit_type, 15)\n\n def save_replay(self):\n \"\"\"Save a replay.\"\"\"\n prefix = self.replay_prefix or self.map_name\n replay_dir = self.replay_dir or \"\"\n replay_path = self._run_config.save_replay(\n self._controller.save_replay(), replay_dir=replay_dir, prefix=prefix)\n logging.info(\"Replay saved at: %s\" % replay_path)\n\n def unit_max_shield(self, unit):\n \"\"\"Returns maximal shield for a given unit.\"\"\"\n if unit.unit_type == 74 or unit.unit_type == self.stalker_id:\n return 80 # Protoss's Stalker\n if unit.unit_type == 73 or unit.unit_type == self.zealot_id:\n return 50 # Protoss's Zaelot\n if unit.unit_type == 4 or unit.unit_type == self.colossus_id:\n return 150 # Protoss's Colossus\n\n def can_move(self, unit, direction):\n \"\"\"Whether a unit can move in a given direction.\"\"\"\n m = self._move_amount / 2\n\n if direction == Direction.NORTH:\n x, y = int(unit.pos.x), int(unit.pos.y + m)\n elif direction == Direction.SOUTH:\n x, y = int(unit.pos.x), int(unit.pos.y - m)\n elif direction == Direction.EAST:\n x, y = int(unit.pos.x + m), int(unit.pos.y)\n else:\n x, y = int(unit.pos.x - m), int(unit.pos.y)\n\n if self.check_bounds(x, y) and self.pathing_grid[x, y]:\n return True\n\n return False\n\n def get_surrounding_points(self, unit, include_self=False):\n \"\"\"Returns the surrounding points of the unit in 8 directions.\"\"\"\n x = int(unit.pos.x)\n y = int(unit.pos.y)\n\n ma = self._move_amount\n\n points = [\n (x, y + 2 * ma),\n (x, y - 2 * ma),\n (x + 2 * ma, y),\n (x - 2 * ma, y),\n (x + ma, y + ma),\n (x - ma, y - ma),\n (x + ma, y - ma),\n (x - ma, y + ma),\n ]\n\n if include_self:\n points.append((x, y))\n\n return points\n\n def check_bounds(self, x, y):\n \"\"\"Whether a point is within the map bounds.\"\"\"\n return (0 <= x < self.map_x and 0 <= y < self.map_y)\n\n def get_surrounding_pathing(self, unit):\n \"\"\"Returns pathing values of the grid surrounding the given unit.\"\"\"\n points = self.get_surrounding_points(unit, include_self=False)\n vals = [\n self.pathing_grid[x, y] if self.check_bounds(x, y) else 1\n for x, y in points\n ]\n return vals\n\n def get_surrounding_height(self, unit):\n \"\"\"Returns height values of the grid surrounding the given unit.\"\"\"\n points = self.get_surrounding_points(unit, include_self=True)\n vals = [\n self.terrain_height[x, y] if self.check_bounds(x, y) else 1\n for x, y in points\n ]\n return vals\n\n def get_obs_agent(self, agent_id):\n \"\"\"Returns observation for agent_id. The observation is composed of:\n\n - agent movement features (where it can move to, height information and pathing grid)\n - enemy features (available_to_attack, health, relative_x, relative_y, shield, unit_type)\n - ally features (visible, distance, relative_x, relative_y, shield, unit_type)\n - agent unit features (health, shield, unit_type)\n\n All of this information is flattened and concatenated into a list,\n in the aforementioned order. To know the sizes of each of the\n features inside the final list of features, take a look at the\n functions ``get_obs_move_feats_size()``,\n ``get_obs_enemy_feats_size()``, ``get_obs_ally_feats_size()`` and\n ``get_obs_own_feats_size()``.\n\n The size of the observation vector may vary, depending on the\n environment configuration and type of units present in the map.\n For instance, non-Protoss units will not have shields, movement\n features may or may not include terrain height and pathing grid,\n unit_type is not included if there is only one type of unit in the\n map etc.).\n\n NOTE: Agents should have access only to their local observations\n during decentralised execution.\n \"\"\"\n unit = self.get_unit_by_id(agent_id)\n\n move_feats_dim = self.get_obs_move_feats_size()\n enemy_feats_dim = self.get_obs_enemy_feats_size()\n ally_feats_dim = self.get_obs_ally_feats_size()\n own_feats_dim = self.get_obs_own_feats_size()\n\n move_feats = np.zeros(move_feats_dim, dtype=np.float32)\n enemy_feats = np.zeros(enemy_feats_dim, dtype=np.float32)\n ally_feats = np.zeros(ally_feats_dim, dtype=np.float32)\n own_feats = np.zeros(own_feats_dim, dtype=np.float32)\n\n if unit.health > 0: # otherwise dead, return all zeros\n x = unit.pos.x\n y = unit.pos.y\n sight_range = self.unit_sight_range(agent_id)\n\n # Movement features\n avail_actions = self.get_avail_agent_actions(agent_id)\n for m in range(self.n_actions_move):\n move_feats[m] = avail_actions[m + 2]\n\n ind = self.n_actions_move\n\n if self.obs_pathing_grid:\n move_feats[\n ind : ind + self.n_obs_pathing\n ] = self.get_surrounding_pathing(unit)\n ind += self.n_obs_pathing\n\n if self.obs_terrain_height:\n move_feats[ind:] = self.get_surrounding_height(unit)\n\n # Enemy features\n for e_id, e_unit in self.enemies.items():\n e_x = e_unit.pos.x\n e_y = e_unit.pos.y\n dist = self.distance(x, y, e_x, e_y)\n\n if (\n dist < sight_range and e_unit.health > 0\n ): # visible and alive\n # Sight range > shoot range\n enemy_feats[e_id, 0] = avail_actions[\n self.n_actions_no_attack + e_id\n ] # available\n enemy_feats[e_id, 1] = dist / sight_range # distance\n enemy_feats[e_id, 2] = (\n e_x - x\n ) / sight_range # relative X\n enemy_feats[e_id, 3] = (\n e_y - y\n ) / sight_range # relative Y\n\n ind = 4\n if self.obs_all_health:\n enemy_feats[e_id, ind] = (\n e_unit.health / e_unit.health_max\n ) # health\n ind += 1\n if self.shield_bits_enemy > 0:\n max_shield = self.unit_max_shield(e_unit)\n enemy_feats[e_id, ind] = (\n e_unit.shield / max_shield\n ) # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(e_unit, False)\n enemy_feats[e_id, ind + type_id] = 1 # unit type\n\n # Ally features\n al_ids = [\n al_id for al_id in range(self.n_agents) if al_id != agent_id\n ]\n for i, al_id in enumerate(al_ids):\n\n al_unit = self.get_unit_by_id(al_id)\n al_x = al_unit.pos.x\n al_y = al_unit.pos.y\n dist = self.distance(x, y, al_x, al_y)\n\n if (\n dist < sight_range and al_unit.health > 0\n ): # visible and alive\n ally_feats[i, 0] = 1 # visible\n ally_feats[i, 1] = dist / sight_range # distance\n ally_feats[i, 2] = (al_x - x) / sight_range # relative X\n ally_feats[i, 3] = (al_y - y) / sight_range # relative Y\n\n ind = 4\n if self.obs_all_health:\n ally_feats[i, ind] = (\n al_unit.health / al_unit.health_max\n ) # health\n ind += 1\n if self.shield_bits_ally > 0:\n max_shield = self.unit_max_shield(al_unit)\n ally_feats[i, ind] = (\n al_unit.shield / max_shield\n ) # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(al_unit, True)\n ally_feats[i, ind + type_id] = 1\n ind += self.unit_type_bits\n\n if self.obs_last_action:\n ally_feats[i, ind:] = self.last_action[al_id]\n\n # Own features\n ind = 0\n if self.obs_own_health:\n own_feats[ind] = unit.health / unit.health_max\n ind += 1\n if self.shield_bits_ally > 0:\n max_shield = self.unit_max_shield(unit)\n own_feats[ind] = unit.shield / max_shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(unit, True)\n own_feats[ind + type_id] = 1\n\n agent_obs = np.concatenate(\n (\n move_feats.flatten(),\n enemy_feats.flatten(),\n ally_feats.flatten(),\n own_feats.flatten(),\n )\n )\n\n if self.obs_timestep_number:\n agent_obs = np.append(agent_obs,\n self._episode_steps / self.episode_limit)\n\n if self.debug:\n logging.debug(\"Obs Agent: {}\".format(agent_id).center(60, \"-\"))\n logging.debug(\"Avail. actions {}\".format(\n self.get_avail_agent_actions(agent_id)))\n logging.debug(\"Move feats {}\".format(move_feats))\n logging.debug(\"Enemy feats {}\".format(enemy_feats))\n logging.debug(\"Ally feats {}\".format(ally_feats))\n logging.debug(\"Own feats {}\".format(own_feats))\n\n return agent_obs\n\n def get_obs(self):\n \"\"\"Returns all agent observations in a list.\n NOTE: Agents should have access only to their local observations\n during decentralised execution.\n \"\"\"\n agents_obs = [self.get_obs_agent(i) for i in range(self.n_agents)]\n return agents_obs\n\n def get_self_feature_size(self):\n return 4 + self.get_obs_move_feats_size()\n\n def get_self_feature(self):\n nf_al = 4 + self.get_obs_move_feats_size()\n self_feature = []\n center_x = self.map_x / 2\n center_y = self.map_y / 2\n for al_id, al_unit in self.agents.items():\n feature = np.zeros((nf_al))\n if al_unit.health > 0:\n x = al_unit.pos.x\n y = al_unit.pos.y\n max_cd = self.unit_max_cooldown(al_unit)\n feature[0] = (\n al_unit.health / al_unit.health_max\n ) # health\n if (\n self.map_type == \"MMM\"\n and al_unit.unit_type == self.medivac_id\n ):\n feature[1] = al_unit.energy / max_cd # energy\n else:\n feature[1] = (\n al_unit.weapon_cooldown / max_cd\n ) # cooldown\n feature[2] = (\n x - center_x\n ) / self.max_distance_x # relative X\n feature[3] = (\n y - center_y\n ) / self.max_distance_y # relative Y\n \n avail_actions = self.get_avail_agent_actions(al_id)\n for m in range(self.n_actions_move):\n feature[m + 4] = avail_actions[m + 2]\n\n ind = self.n_actions_move + 4\n\n if self.obs_pathing_grid:\n feature[\n ind : ind + self.n_obs_pathing\n ] = self.get_surrounding_pathing(al_unit)\n ind += self.n_obs_pathing\n\n if self.obs_terrain_height:\n feature[ind:] = self.get_surrounding_height(al_unit)\n self_feature.append(feature)\n return self_feature\n\n def get_state(self):\n \"\"\"Returns the global state.\n NOTE: This functon should not be used during decentralised execution.\n \"\"\"\n if self.obs_instead_of_state:\n obs_concat = np.concatenate(self.get_obs(), axis=0).astype(\n np.float32\n )\n return obs_concat\n\n nf_al = 4 + self.shield_bits_ally + self.unit_type_bits\n nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits\n\n ally_state = np.zeros((self.n_agents, nf_al))\n enemy_state = np.zeros((self.n_enemies, nf_en))\n\n center_x = self.map_x / 2\n center_y = self.map_y / 2\n\n for al_id, al_unit in self.agents.items():\n if al_unit.health > 0:\n x = al_unit.pos.x\n y = al_unit.pos.y\n max_cd = self.unit_max_cooldown(al_unit)\n\n ally_state[al_id, 0] = (\n al_unit.health / al_unit.health_max\n ) # health\n if (\n self.map_type == \"MMM\"\n and al_unit.unit_type == self.medivac_id\n ):\n ally_state[al_id, 1] = al_unit.energy / max_cd # energy\n else:\n ally_state[al_id, 1] = (\n al_unit.weapon_cooldown / max_cd\n ) # cooldown\n ally_state[al_id, 2] = (\n x - center_x\n ) / self.max_distance_x # relative X\n ally_state[al_id, 3] = (\n y - center_y\n ) / self.max_distance_y # relative Y\n\n ind = 4\n if self.shield_bits_ally > 0:\n max_shield = self.unit_max_shield(al_unit)\n ally_state[al_id, ind] = (\n al_unit.shield / max_shield\n ) # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(al_unit, True)\n ally_state[al_id, ind + type_id] = 1\n\n for e_id, e_unit in self.enemies.items():\n if e_unit.health > 0:\n x = e_unit.pos.x\n y = e_unit.pos.y\n\n enemy_state[e_id, 0] = (\n e_unit.health / e_unit.health_max\n ) # health\n enemy_state[e_id, 1] = (\n x - center_x\n ) / self.max_distance_x # relative X\n enemy_state[e_id, 2] = (\n y - center_y\n ) / self.max_distance_y # relative Y\n\n ind = 3\n if self.shield_bits_enemy > 0:\n max_shield = self.unit_max_shield(e_unit)\n enemy_state[e_id, ind] = (\n e_unit.shield / max_shield\n ) # shield\n ind += 1\n\n if self.unit_type_bits > 0:\n type_id = self.get_unit_type_id(e_unit, False)\n enemy_state[e_id, ind + type_id] = 1\n\n state = np.append(ally_state.flatten(), enemy_state.flatten())\n if self.state_last_action:\n state = np.append(state, self.last_action.flatten())\n if self.state_timestep_number:\n state = np.append(state,\n self._episode_steps / self.episode_limit)\n\n state = state.astype(dtype=np.float32)\n\n if self.debug:\n logging.debug(\"STATE\".center(60, \"-\"))\n logging.debug(\"Ally state {}\".format(ally_state))\n logging.debug(\"Enemy state {}\".format(enemy_state))\n if self.state_last_action:\n logging.debug(\"Last actions {}\".format(self.last_action))\n\n return state\n\n def get_obs_enemy_feats_size(self):\n \"\"\" Returns the dimensions of the matrix containing enemy features.\n Size is n_enemies x n_features.\n \"\"\"\n nf_en = 4 + self.unit_type_bits\n\n if self.obs_all_health:\n nf_en += 1 + self.shield_bits_enemy\n\n return self.n_enemies, nf_en\n\n def get_obs_ally_feats_size(self):\n \"\"\"Returns the dimensions of the matrix containing ally features.\n Size is n_allies x n_features.\n \"\"\"\n nf_al = 4 + self.unit_type_bits\n\n if self.obs_all_health:\n nf_al += 1 + self.shield_bits_ally\n\n if self.obs_last_action:\n nf_al += self.n_actions\n\n return self.n_agents - 1, nf_al\n\n def get_obs_own_feats_size(self):\n \"\"\"Returns the size of the vector containing the agents' own features.\n \"\"\"\n own_feats = self.unit_type_bits\n if self.obs_own_health:\n own_feats += 1 + self.shield_bits_ally\n if self.obs_timestep_number:\n own_feats += 1\n\n return own_feats\n\n def get_obs_move_feats_size(self):\n \"\"\"Returns the size of the vector containing the agents's movement-related features.\"\"\"\n move_feats = self.n_actions_move\n if self.obs_pathing_grid:\n move_feats += self.n_obs_pathing\n if self.obs_terrain_height:\n move_feats += self.n_obs_height\n\n return move_feats\n\n def get_obs_size(self):\n \"\"\"Returns the size of the observation.\"\"\"\n own_feats = self.get_obs_own_feats_size()\n move_feats = self.get_obs_move_feats_size()\n\n n_enemies, n_enemy_feats = self.get_obs_enemy_feats_size()\n n_allies, n_ally_feats = self.get_obs_ally_feats_size()\n\n enemy_feats = n_enemies * n_enemy_feats\n ally_feats = n_allies * n_ally_feats\n\n return move_feats + enemy_feats + ally_feats + own_feats\n\n def get_state_size(self):\n \"\"\"Returns the size of the global state.\"\"\"\n if self.obs_instead_of_state:\n return self.get_obs_size() * self.n_agents\n\n nf_al = 4 + self.shield_bits_ally + self.unit_type_bits\n nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits\n\n enemy_state = self.n_enemies * nf_en\n ally_state = self.n_agents * nf_al\n\n size = enemy_state + ally_state\n\n if self.state_last_action:\n size += self.n_agents * self.n_actions\n if self.state_timestep_number:\n size += 1\n\n return size\n\n def get_visibility_matrix(self):\n \"\"\"Returns a boolean numpy array of dimensions \n (n_agents, n_agents + n_enemies) indicating which units\n are visible to each agent.\n \"\"\"\n arr = np.zeros(\n (self.n_agents, self.n_agents + self.n_enemies), \n dtype=np.bool,\n )\n\n for agent_id in range(self.n_agents):\n current_agent = self.get_unit_by_id(agent_id)\n if current_agent.health > 0: # it agent not dead\n x = current_agent.pos.x\n y = current_agent.pos.y\n sight_range = self.unit_sight_range(agent_id)\n\n # Enemies\n for e_id, e_unit in self.enemies.items():\n e_x = e_unit.pos.x\n e_y = e_unit.pos.y\n dist = self.distance(x, y, e_x, e_y)\n\n if (dist < sight_range and e_unit.health > 0):\n # visible and alive\n arr[agent_id, self.n_agents + e_id] = 1\n\n # The matrix for allies is filled symmetrically\n al_ids = [\n al_id for al_id in range(self.n_agents) \n if al_id > agent_id\n ]\n for i, al_id in enumerate(al_ids):\n al_unit = self.get_unit_by_id(al_id)\n al_x = al_unit.pos.x\n al_y = al_unit.pos.y\n dist = self.distance(x, y, al_x, al_y)\n\n if (dist < sight_range and al_unit.health > 0): \n # visible and alive\n arr[agent_id, al_id] = arr[al_id, agent_id] = 1\n\n return arr\n\n def get_unit_type_id(self, unit, ally):\n \"\"\"Returns the ID of unit type in the given scenario.\"\"\"\n if ally: # use new SC2 unit types\n type_id = unit.unit_type - self._min_unit_type\n else: # use default SC2 unit types\n if self.map_type == \"stalkers_and_zealots\":\n # id(Stalker) = 74, id(Zealot) = 73\n type_id = unit.unit_type - 73\n elif self.map_type == \"colossi_stalkers_zealots\":\n # id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4\n if unit.unit_type == 4:\n type_id = 0\n elif unit.unit_type == 74:\n type_id = 1\n else:\n type_id = 2\n elif self.map_type == \"bane\":\n if unit.unit_type == 9:\n type_id = 0\n else:\n type_id = 1\n elif self.map_type == \"MMM\":\n if unit.unit_type == 51:\n type_id = 0\n elif unit.unit_type == 48:\n type_id = 1\n else:\n type_id = 2\n\n return type_id\n\n def get_avail_agent_actions(self, agent_id):\n \"\"\"Returns the available actions for agent_id.\"\"\"\n unit = self.get_unit_by_id(agent_id)\n if unit.health > 0:\n # cannot choose no-op when alive\n avail_actions = [0] * self.n_actions\n\n # stop should be allowed\n avail_actions[1] = 1\n\n # see if we can move\n if self.can_move(unit, Direction.NORTH):\n avail_actions[2] = 1\n if self.can_move(unit, Direction.SOUTH):\n avail_actions[3] = 1\n if self.can_move(unit, Direction.EAST):\n avail_actions[4] = 1\n if self.can_move(unit, Direction.WEST):\n avail_actions[5] = 1\n\n # Can attack only alive units that are alive in the shooting range\n shoot_range = self.unit_shoot_range(agent_id)\n\n target_items = self.enemies.items()\n if self.map_type == \"MMM\" and unit.unit_type == self.medivac_id:\n # Medivacs cannot heal themselves or other flying units\n target_items = [\n (t_id, t_unit)\n for (t_id, t_unit) in self.agents.items()\n if t_unit.unit_type != self.medivac_id\n ]\n\n for t_id, t_unit in target_items:\n if t_unit.health > 0:\n dist = self.distance(\n unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y\n )\n if dist <= shoot_range:\n avail_actions[t_id + self.n_actions_no_attack] = 1\n\n return avail_actions\n\n else:\n # only no-op allowed\n return [1] + [0] * (self.n_actions - 1)\n\n def get_avail_actions(self):\n \"\"\"Returns the available actions of all agents in a list.\"\"\"\n avail_actions = []\n for agent_id in range(self.n_agents):\n avail_agent = self.get_avail_agent_actions(agent_id)\n avail_actions.append(avail_agent)\n return avail_actions\n\n def close(self):\n \"\"\"Close StarCraft II.\"\"\"\n if self._sc2_proc:\n self._sc2_proc.close()\n\n def seed(self):\n \"\"\"Returns the random seed used by the environment.\"\"\"\n return self._seed\n\n def render(self):\n \"\"\"Not implemented.\"\"\"\n pass\n\n def _kill_all_units(self):\n \"\"\"Kill all units on the map.\"\"\"\n units_alive = [\n unit.tag for unit in self.agents.values() if unit.health > 0\n ] + [unit.tag for unit in self.enemies.values() if unit.health > 0]\n debug_command = [\n d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=units_alive))\n ]\n self._controller.debug(debug_command)\n\n def init_units(self):\n \"\"\"Initialise the units.\"\"\"\n while True:\n # Sometimes not all units have yet been created by SC2\n self.agents = {}\n self.enemies = {}\n\n ally_units = [\n unit\n for unit in self._obs.observation.raw_data.units\n if unit.owner == 1\n ]\n ally_units_sorted = sorted(\n ally_units,\n key=attrgetter(\"unit_type\", \"pos.x\", \"pos.y\"),\n reverse=False,\n )\n\n for i in range(len(ally_units_sorted)):\n self.agents[i] = ally_units_sorted[i]\n if self.debug:\n logging.debug(\n \"Unit {} is {}, x = {}, y = {}\".format(\n len(self.agents),\n self.agents[i].unit_type,\n self.agents[i].pos.x,\n self.agents[i].pos.y,\n )\n )\n\n for unit in self._obs.observation.raw_data.units:\n if unit.owner == 2:\n self.enemies[len(self.enemies)] = unit\n if self._episode_count == 0:\n self.max_reward += unit.health_max + unit.shield_max\n\n if self._episode_count == 0:\n min_unit_type = min(\n unit.unit_type for unit in self.agents.values()\n )\n self._init_ally_unit_types(min_unit_type)\n\n all_agents_created = (len(self.agents) == self.n_agents)\n all_enemies_created = (len(self.enemies) == self.n_enemies)\n\n if all_agents_created and all_enemies_created: # all good\n return\n\n try:\n self._controller.step(1)\n self._obs = self._controller.observe()\n except (protocol.ProtocolError, protocol.ConnectionError):\n self.full_restart()\n self.reset()\n\n def update_units(self):\n \"\"\"Update units after an environment step.\n This function assumes that self._obs is up-to-date.\n \"\"\"\n n_ally_alive = 0\n n_enemy_alive = 0\n\n # Store previous state\n self.previous_ally_units = deepcopy(self.agents)\n self.previous_enemy_units = deepcopy(self.enemies)\n\n for al_id, al_unit in self.agents.items():\n updated = False\n for unit in self._obs.observation.raw_data.units:\n if al_unit.tag == unit.tag:\n self.agents[al_id] = unit\n updated = True\n n_ally_alive += 1\n break\n\n if not updated: # dead\n al_unit.health = 0\n\n for e_id, e_unit in self.enemies.items():\n updated = False\n for unit in self._obs.observation.raw_data.units:\n if e_unit.tag == unit.tag:\n self.enemies[e_id] = unit\n updated = True\n n_enemy_alive += 1\n break\n\n if not updated: # dead\n e_unit.health = 0\n\n if (n_ally_alive == 0 and n_enemy_alive > 0\n or self.only_medivac_left(ally=True)):\n return -1 # lost\n if (n_ally_alive > 0 and n_enemy_alive == 0\n or self.only_medivac_left(ally=False)):\n return 1 # won\n if n_ally_alive == 0 and n_enemy_alive == 0:\n return 0\n\n return None\n\n def _init_ally_unit_types(self, min_unit_type):\n \"\"\"Initialise ally unit types. Should be called once from the\n init_units function.\n \"\"\"\n self._min_unit_type = min_unit_type\n if self.map_type == \"marines\":\n self.marine_id = min_unit_type\n elif self.map_type == \"stalkers_and_zealots\":\n self.stalker_id = min_unit_type\n self.zealot_id = min_unit_type + 1\n elif self.map_type == \"colossi_stalkers_zealots\":\n self.colossus_id = min_unit_type\n self.stalker_id = min_unit_type + 1\n self.zealot_id = min_unit_type + 2\n elif self.map_type == \"MMM\":\n self.marauder_id = min_unit_type\n self.marine_id = min_unit_type + 1\n self.medivac_id = min_unit_type + 2\n elif self.map_type == \"zealots\":\n self.zealot_id = min_unit_type\n elif self.map_type == \"hydralisks\":\n self.hydralisk_id = min_unit_type\n elif self.map_type == \"stalkers\":\n self.stalker_id = min_unit_type\n elif self.map_type == \"colossus\":\n self.colossus_id = min_unit_type\n elif self.map_type == \"bane\":\n self.baneling_id = min_unit_type\n self.zergling_id = min_unit_type + 1\n\n def only_medivac_left(self, ally):\n \"\"\"Check if only Medivac units are left.\"\"\"\n if self.map_type != \"MMM\":\n return False\n\n if ally:\n units_alive = [\n a\n for a in self.agents.values()\n if (a.health > 0 and a.unit_type != self.medivac_id)\n ]\n if len(units_alive) == 0:\n return True\n return False\n else:\n units_alive = [\n a\n for a in self.enemies.values()\n if (a.health > 0 and a.unit_type != self.medivac_id)\n ]\n if len(units_alive) == 1 and units_alive[0].unit_type == 54:\n return True\n return False\n\n def get_unit_by_id(self, a_id):\n \"\"\"Get unit by ID.\"\"\"\n return self.agents[a_id]\n\n def get_stats(self):\n stats = {\n \"battles_won\": self.battles_won,\n \"battles_game\": self.battles_game,\n \"battles_draw\": self.timeouts,\n \"win_rate\": self.battles_won / self.battles_game,\n \"timeouts\": self.timeouts,\n \"restarts\": self.force_restarts,\n }\n return stats\n\n def get_alive_agents(self):\n alive_allies_list = np.zeros([self.n_agents, self.n_agents])\n for al_id_1, al_unit_1 in self.agents.items():\n for al_id_2, al_unit_2 in self.agents.items():\n if al_unit_1.health == 0 or al_unit_2.health == 0:\n alive_allies_list[al_id_1, al_id_2] = 0\n else:\n alive_allies_list[al_id_1, al_id_2] = 1\n return alive_allies_list\n"
] |
[
[
"numpy.eye",
"numpy.array",
"numpy.zeros",
"numpy.append"
]
] |
LucaZampieri/pyvista
|
[
"ae2a7a0559961839c5aa2979228fcdef1f4b188e"
] |
[
"examples/00-load/create-structured-surface.py"
] |
[
"\"\"\"\n.. _ref_create_structured:\n\nCreating a Structured Surface\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nCreate a StructuredGrid surface from NumPy arrays\n\"\"\"\n\n# sphinx_gallery_thumbnail_number = 2\nimport pyvista as pv\nfrom pyvista import examples\nimport numpy as np\n\n\n###############################################################################\n# From NumPy Meshgrid\n# +++++++++++++++++++\n#\n# Create a simple meshgrid using NumPy\n\n# Make data\nx = np.arange(-10, 10, 0.25)\ny = np.arange(-10, 10, 0.25)\nx, y = np.meshgrid(x, y)\nr = np.sqrt(x ** 2 + y ** 2)\nz = np.sin(r)\n\n###############################################################################\n# Now pass the NumPy meshgrid to PyVista\n\n# Create and plot structured grid\ngrid = pv.StructuredGrid(x, y, z)\ngrid.plot()\n\n###############################################################################\n\n# Plot mean curvature as well\ngrid.plot_curvature(clim=[-1, 1])\n\n###############################################################################\n# Generating a structured grid is a one liner in this module, and the points\n# from the resulting surface can be accessed as a NumPy array:\n\ngrid.points\n\n\n###############################################################################\n# From XYZ Points\n# +++++++++++++++\n#\n# Quite often, you might be given a set of coordinates (XYZ points) in a simple\n# tabular format where there exists some structure such that grid could be\n# built between the nodes you have. A great example is found in\n# `pyvista-support#16`_ where a structured grid that is rotated from the\n# cartesian reference frame is given as just XYZ points. In these cases, all\n# that is needed to recover the grid is the dimensions of the grid\n# (`nx` by `ny` by `nz`) and that the coordinates are ordered appropriately.\n#\n# .. _pyvista-support#16: https://github.com/pyvista/pyvista-support/issues/16\n#\n# For this example, we will create a small dataset and rotate the\n# coordinates such that they are not on orthogonal to cartesian reference\n# frame.\n\n\ndef make_point_set():\n \"\"\"Ignore the contents of this function. Just know that it returns an\n n by 3 numpy array of structured coordinates.\"\"\"\n n, m = 29, 32\n x = np.linspace(-200, 200, num=n) + np.random.uniform(-5, 5, size=n)\n y = np.linspace(-200, 200, num=m) + np.random.uniform(-5, 5, size=m)\n xx, yy = np.meshgrid(x, y)\n A, b = 100, 100\n zz = A * np.exp(-0.5 * ((xx / b) ** 2.0 + (yy / b) ** 2.0))\n points = np.c_[xx.reshape(-1), yy.reshape(-1), zz.reshape(-1)]\n foo = pv.PolyData(points)\n foo.rotate_z(36.6)\n return foo.points\n\n\n# Get the points as a 2D NumPy array (N by 3)\npoints = make_point_set()\npoints[0:5, :]\n\n###############################################################################\n# Now pretend that the (n by 3) NumPy array above are coordinates that you\n# have, possibly from a file with three columns of XYZ points.\n#\n# We simply need to recover the dimensions of the grid that these points make\n# and then we can generate a :class:`pyvista.StructuredGrid` mesh.\n#\n# Let's preview the points to see what we are dealing with:\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(10, 10))\nplt.scatter(points[:, 0], points[:, 1], c=points[:, 2])\nplt.axis(\"image\")\nplt.xlabel(\"X Coordinate\")\nplt.ylabel(\"Y Coordinate\")\nplt.show()\n\n###############################################################################\n# In the figure above, we can see some inherit structure to the points and thus\n# we could connect the points as a structured grid. All we need to know are the\n# dimensions of the grid present. In this case, we know (because we made this\n# dataset) the dimensions are ``[29, 32, 1]``, but you might not know the\n# dimensions of your pointset. There are a few ways to figure out the\n# dimensionality of structured grid including:\n#\n# * manually conting the nodes along the edges of the pointset\n# * using a technique like principle component analysis to strip the rotation from the dataset and count the unique values along each axis for the new;y projected dataset.\n\n# Once you've figured out your grid's dimensions, simple create the\n# :class:`pyvista.StructuredGrid` as follows:\n\nmesh = pv.StructuredGrid()\n# Set the coordinates from the numpy array\nmesh.points = points\n# set the dimensions\nmesh.dimensions = [29, 32, 1]\n\n# and then inspect it!\nmesh.plot(show_edges=True, show_grid=True, cpos=\"xy\")\n\n\n###############################################################################\n# Extending a 2D StructuredGrid to 3D\n# +++++++++++++++++++++++++++++++++++\n#\n# A 2D :class:`pyvista.StructuredGrid` mesh can be extended into a 3D mesh.\n# This is highly applicable when wanting to create a terrain following mesh\n# in earth science research applications.\n#\n# For example, we could have a :class:`pyvista.StructuredGrid` of a topography\n# surface and extend that surface to a few different levels and connect each\n# \"level\" to create the 3D terrain following mesh.\n#\n# Let's start with a simple example by extending the wave mesh to 3D\nstruct = examples.load_structured()\nstruct.plot(show_edges=True)\n\n###############################################################################\ntop = struct.points.copy()\nbottom = struct.points.copy()\nbottom[:,-1] = -10.0 # Wherever you want the plane\n\nvol = pv.StructuredGrid()\nvol.points = np.vstack((top, bottom))\nvol.dimensions = [*struct.dimensions[0:2], 2]\nvol.plot(show_edges=True)\n"
] |
[
[
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"numpy.arange",
"numpy.vstack",
"numpy.sin",
"numpy.random.uniform",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axis",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
zheedong/Stanford_CS231n_assignment_2017
|
[
"4b333d48aabd6192dafd3725fed55546b8871df6"
] |
[
"assignment1/cs231n/classifiers/fc_net.py"
] |
[
"from builtins import range\nfrom builtins import object\nimport numpy as np\n\nfrom ..layers import *\nfrom ..layer_utils import *\n\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network with ReLU nonlinearity and\n softmax loss that uses a modular layer design. We assume an input dimension\n of D, a hidden dimension of H, and perform classification over C classes.\n\n The architecure should be affine - relu - affine - softmax.\n\n Note that this class does not implement gradient descent; instead, it\n will interact with a separate Solver object that is responsible for running\n optimization.\n\n The learnable parameters of the model are stored in the dictionary\n self.params that maps parameter names to numpy arrays.\n \"\"\"\n\n def __init__(\n self,\n input_dim=3 * 32 * 32,\n hidden_dim=100,\n num_classes=10,\n weight_scale=1e-3,\n reg=0.0,\n ):\n \"\"\"\n Initialize a new network.\n\n Inputs:\n - input_dim: An integer giving the size of the input\n - hidden_dim: An integer giving the size of the hidden layer\n - num_classes: An integer giving the number of classes to classify\n - weight_scale: Scalar giving the standard deviation for random\n initialization of the weights.\n - reg: Scalar giving L2 regularization strength.\n \"\"\"\n self.params = {}\n self.reg = reg\n\n ############################################################################\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\n # should be initialized from a Gaussian centered at 0.0 with #\n # standard deviation equal to weight_scale, and biases should be #\n # initialized to zero. All weights and biases should be stored in the #\n # dictionary self.params, with first layer weights #\n # and biases using the keys 'W1' and 'b1' and second layer #\n # weights and biases using the keys 'W2' and 'b2'. #\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n self.params['W1'] = np.random.normal(0.0, weight_scale, (input_dim, hidden_dim))\n self.params['b1'] = 0\n\n self.params['W2'] = np.random.normal(0.0, weight_scale, (hidden_dim, num_classes))\n self.params['b2'] = 0\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n def loss(self, X, y=None):\n \"\"\"\n Compute loss and gradient for a minibatch of data.\n\n Inputs:\n - X: Array of input data of shape (N, d_1, ..., d_k)\n - y: Array of labels, of shape (N,). y[i] gives the label for X[i].\n\n Returns:\n If y is None, then run a test-time forward pass of the model and return:\n - scores: Array of shape (N, C) giving classification scores, where\n scores[i, c] is the classification score for X[i] and class c.\n\n If y is not None, then run a training-time forward and backward pass and\n return a tuple of:\n - loss: Scalar value giving the loss\n - grads: Dictionary with the same keys as self.params, mapping parameter\n names to gradients of the loss with respect to those parameters.\n \"\"\"\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n W1 = self.params['W1']\n b1 = self.params['b1']\n W2 = self.params['W2']\n b2 = self.params['b2']\n relu_output, relu_cache = affine_relu_forward(X, W1, b1)\n scores, cache = affine_forward(relu_output, W2, b2)\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n loss, d_scores = softmax_loss(scores, y) # softmax_loss return gradient of scores\n loss += 0.5 * self.reg * np.sum(np.sum(W1 * W1) + np.sum(W2 * W2)) # Add L2 regularization\n dx, grads['W2'], grads['b2'] = affine_backward(d_scores, cache) # cache is cache of last output affine layer\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dx, relu_cache)\n grads['W1'] += self.reg * W1 # Add regularization for W1\n grads['W2'] += self.reg * W2\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads\n"
] |
[
[
"numpy.random.normal",
"numpy.sum"
]
] |
jeanphilippemercier/microquake
|
[
"0b9d07be11eddd64619e46939c320487531602a3"
] |
[
"microquake/clients/ims/web_client.py"
] |
[
"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------\n# Filename: rest_api.py\n# Purpose: module to interact with the IMS RESTAPI\n# Author: microquake development team\n# Email: devs@microquake.org\n#\n# Copyright (C) 2016 microquake development team\n# --------------------------------------------------------------------\n\"\"\"\nmodule to interact IMS web API\n\n:copyright:\n microquake development team (devs@microquake.org)\n:license:\n GNU Lesser General Public License, Version 3\n (http://www.gnu.org/copyleft/lesser.html)\n\"\"\"\n\nimport sys\nfrom datetime import datetime\nfrom gzip import GzipFile\nfrom struct import unpack\nfrom time import time as timer\n\nimport numpy as np\nimport requests\nfrom obspy import UTCDateTime\nfrom microquake.core.event import Catalog\nfrom obspy.core.event import (ConfidenceEllipsoid, OriginUncertainty,\n WaveformStreamID)\nfrom obspy.core.trace import Stats\n\nfrom loguru import logger\nfrom microquake.core import Stream, Trace\nfrom microquake.core.event import Arrival, Event, Magnitude, Origin, Pick\n\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO, BytesIO\n\n# for retries\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nretry_strategy = Retry(\n total=3,\n status_forcelist=[429, 500, 502, 503, 504],\n method_whitelist=[\"HEAD\", \"GET\", \"OPTIONS\"]\n)\nadapter = HTTPAdapter(max_retries=retry_strategy)\ns = requests.Session()\ns.mount(\"https://\", adapter)\ns.mount(\"http://\", adapter)\n\n\ndef get_continuous_wrapper(base_url, start_datetime, end_datetime, site_id,\n format='binary-gz', network='', sampling_rate=6000.,\n nan_limit=10):\n\n try:\n st = get_continuous(base_url, start_datetime, end_datetime,\n [site_id], None, format=format, network=network,\n sampling_rate=sampling_rate, nan_limit=nan_limit)\n except Exception as e:\n logger.error(e)\n return 0\n\n return st\n\n\ndef get_continuous(base_url, start_datetime, end_datetime,\n site_ids, time_zone, format='binary-gz', network='',\n sampling_rate=6000., nan_limit=10):\n \"\"\"\n :param base_url: base url of the IMS server\n example: http://10.95.74.35:8002/ims-database-server/databases/mgl\n :param start_datetime: request start time (if not localized, UTC assumed)\n :type start_datetime: datetime.datetime\n :param end_datetime: request end time (if not localized, UTC assumed)\n :type end_datetime: datetime.datetime\n :param site_ids: list of sites for which data should be read\n :type site_ids: list or integer\n :param format: Requested data format ('possible values: binary and binary-gz')\n :type format: str\n :param network: Network name (default = '')\n :type network: str\n :param sampling_rate: signal sampling rate\n :type sampling_rate: float\n :param nan_limit: maximum length of a not a number sequence to be\n interpolated\n :type nan_limit: int\n :param dtype: output type for mseed\n :return: microquake.core.stream.Stream\n \"\"\"\n\n \"\"\"\n binary file structure:\n * a binary header of size N bytes, consisting of\n - header size written as int32\n - netid written as int32\n - siteid written as int32\n - start time written as int64(time in nanoseconds)\n - end time written as int64(time in nanoseconds)\n - netADC id written as int32\n - sensor id written as int32\n - attenuator id written as int32\n - attenuator configuration id written as int32\n - remainder of bytes(N minus total so far) written as zero\n padded.\n * A sequence of 20 - byte samples, each consisting of\n - sample timestamp, written as int64(time in nanoseconds)\n - raw X value as float32\n - raw Y value as float32\n - raw Z value as float32\n \"\"\"\n\n if isinstance(site_ids, int):\n site_ids = [site_ids]\n\n start_datetime_utc = UTCDateTime(start_datetime)\n end_datetime_utc = UTCDateTime(end_datetime)\n reqtime_start_nano = int(start_datetime_utc.timestamp * 1e6) * int(1e3)\n reqtime_end_nano = int(end_datetime_utc.timestamp * 1e6) * int(1e3)\n url_cont = base_url + '/continuous-seismogram?' + \\\n 'startTimeNanos=%d&endTimeNanos=%d&siteId' + \\\n '=%d&format=%s'\n\n stream = Stream()\n\n for site in site_ids:\n ts_processing = timer()\n\n if type(site) == str:\n site = int(site)\n\n url = url_cont % (reqtime_start_nano, reqtime_end_nano, site, format)\n url = url.replace('//', '/').replace('http:/', 'http://')\n\n logger.info(\"Getting trace for station %d between %s\"\n \"%s\" % (site, start_datetime, end_datetime))\n\n ts = timer()\n try:\n r = requests.get(url, stream=True)\n except requests.RequestException as e:\n logger.error(e)\n continue\n\n if r.status_code != 200:\n # raise Exception('request failed! \\n %s' % url)\n continue\n\n if format == 'binary-gz':\n fileobj = GzipFile(fileobj=BytesIO(r.content))\n elif format == 'binary':\n fileobj = BytesIO(r.content)\n else:\n raise Exception('unsuported format!')\n continue\n\n fileobj.seek(0)\n te = timer()\n logger.info('Completing request in %.2f seconds' % (te - ts))\n\n # Reading header\n\n if len(r.content) < 44:\n continue\n ts = timer()\n header_size = unpack('>i', fileobj.read(4))[0]\n net_id = unpack('>i', fileobj.read(4))[0]\n site_id = unpack('>i', fileobj.read(4))[0]\n starttime = unpack('>q', fileobj.read(8))[0]\n endtime = unpack('>q', fileobj.read(8))[0]\n netADC_id = unpack('>i', fileobj.read(4))[0]\n sensor_id = unpack('>i', fileobj.read(4))[0]\n attenuator_id = unpack('>i', fileobj.read(4))[0]\n attenuator_config_id = unpack('>i', fileobj.read(4))[0]\n te = timer()\n\n ts = timer()\n # Reading data\n fileobj.seek(header_size)\n try:\n content = fileobj.read()\n except(IOError):\n logger.info(f\"Error reading channel data, skipping site {site}\")\n continue\n\n time, sigs = strided_read(content)\n\n time_norm = (time - time[0]) / 1e9\n nan_ranges = get_nan_ranges(time_norm, sampling_rate, limit=nan_limit)\n\n time_new = np.arange(time_norm[0], time_norm[-1], 1. / sampling_rate)\n\n newsigs = np.zeros((len(sigs), len(time_new)), dtype=np.float32)\n\n for i in range(len(sigs)):\n newsigs[i] = np.interp(time_new, time_norm, sigs[i])\n\n nan_ranges_ix = ((nan_ranges - time_new[0]) *\n sampling_rate).astype(int)\n\n for chan in newsigs:\n for lims in nan_ranges_ix:\n chan[lims[0]:lims[1]] = np.nan\n\n te = timer()\n\n ts = timer()\n chans = ['X', 'Y', 'Z']\n\n for i in range(len(newsigs)):\n if np.all(np.isnan(newsigs[i])):\n continue\n tr = Trace(data=newsigs[i])\n tr.stats.sampling_rate = sampling_rate\n tr.stats.network = str(network)\n tr.stats.station = str(site)\n # it seems that the time returned by IMS is local time...\n # The IMS API has changed. It was returning the time in local\n # time, now the time is UTC.\n starttime_utc = datetime.utcfromtimestamp(time[0] / 1e9)\n # starttime_local = starttime_local.replace(tzinfo=time_zone)\n tr.stats.starttime = UTCDateTime(starttime_utc)\n tr.stats.channel = chans[i]\n stream.append(tr)\n\n te = timer()\n logger.info('Completing stream build in %.2f seconds' % (te - ts))\n\n return stream\n\n\ndef get_nan_ranges(tnorm, sr, limit):\n \"\"\"\n Find gap in the data\n :param tnorm: normalized time vector\n :param sr: sampling rate\n :param limit: maximum size for gap to be interpolated\n :return:\n \"\"\"\n # limit is minimum consecutive missing dt's to assign nans\n diff = np.diff(tnorm) * sr\n ibad = np.where(diff > limit)[0]\n miss_start = tnorm[ibad]\n miss_lens = diff[ibad] / sr\n nan_ranges = np.vstack((miss_start, miss_start + miss_lens)).T\n\n return nan_ranges\n\n\ndef strided_read(content):\n \"\"\"\n Efficiently read the content of the binary object returned by the IMS server\n :param content: content of the binary object returned by IMS\n :return: time and signal\n \"\"\"\n\n npts = int(len(content) / 20)\n time = np.ndarray((npts,), '>q', content, 0, (20, ))\n sigs = np.zeros((3, npts), dtype=np.float32)\n sigs[0] = np.ndarray((npts,), '>f', content, 8, (20, ))\n sigs[1] = np.ndarray((npts,), '>f', content, 12, (20, ))\n sigs[2] = np.ndarray((npts,), '>f', content, 16, (20, ))\n\n return time, sigs\n\n\ndef EpochNano2UTCDateTime(timestamp, timezone):\n \"\"\"\n Convert a time stamp in nanosecond to a microquake.UTCDateTime object\n :param timezone:\n :param timestamp: timestamp expressed in nanasecond\n :return: a microquake.UTCDateTime object\n \"\"\"\n\n time_utc = datetime.utcfromtimestamp(timestamp / 1.e9)\n\n return UTCDateTime(time_utc)\n\n\ndef get_catalogue(base_url, start_datetime, end_datetime, inventory,\n timezone, blast=True, event=True, accepted=True, manual=True,\n get_arrivals=False):\n \"\"\"\n read catalogue data through the REST API provided by the IMS synapse\n server and return a QuakeML object\n\n :param base_url: base url of the IMS server e.g.\n http://10.95.74.35:8002/ims-database-server/databases/mgl\n :param start_datetime: request start time (if not localized, UTC assumed)\n :type start_datetime: datetime.datetime\n :param end_datetime: request end time (if not localized, UTC assumed)\n :type end_datetime: datetime.datetype\n :param inventory: a site object containing system information\n :type inventory: microquake.core.data.inventory\n :params timezone: time zone\n :param blast: if True return blasts (default True)\n :type blast: bool\n :param event: if True return events (default True)\n :type event: bool\n :param accepted: if True only accepted events and blasts are returned (\n default True)\n :type accepted: bool\n :param manual: if True only manually processed event are returned (\n default True)\n :param get_arrivals: if True picks are also returned along with the\n catalogue.\n :return: a catalogue containing a list of events\n :rtype: microquake.core.Catalog\n \"\"\"\n\n import calendar\n import pandas as pd\n start_datetime_utc = UTCDateTime(start_datetime)\n end_datetime_utc = UTCDateTime(end_datetime)\n\n time_start = calendar.timegm(start_datetime_utc.timetuple()) * 1e9\n time_end = calendar.timegm(end_datetime_utc.timetuple()) * 1e9\n\n url = base_url + \\\n '/events/csv?startTimeNanos=%d&endTimeNanos=%d&blast¶ms=' \\\n % (time_start, time_end) \\\n + 'ACCEPTED, ASSOC_SEISMOGRAM_NAMES, AUTO_PROCESSED, BLAST,' \\\n + 'CORNER_FREQUENCY, DYNAMIC_STRESS_DROP, ENERGY, ENERGY_P,' \\\n + 'ENERGY_S, EVENT_MODIFICATION_TIME, EVENT_NAME,' \\\n + 'EVENT_TIME_FORMATTED, EVENT_TIME_NANOS, LOCAL_MAGNITUDE,' \\\n + 'LOCATION_RESIDUAL, LOCATION_X, LOCATION_Y, LOCATION_Z,' \\\n + 'MANUALLY_PROCESSED, NUM_ACCEPTED_TRIGGERS, NUM_TRIGGERS' \\\n + 'POTENCY, POTENCY_P, POTENCY_S, STATIC_STRESS_DROP, TAP_TEST' \\\n + 'TEST, TRIGGERED_SITES, USER_NAME'\n\n # will need to add tags for the error ellipsoid\n\n try:\n r = requests.get(url)\n except requests.RequestException as e:\n logger.error(e)\n return\n\n enable = False\n\n for line in r.iter_lines():\n line = line.decode('utf-8')\n\n if \"EVENT_NAME\" in line:\n enable = True\n csv_string = str(line) + '\\n'\n\n continue\n\n if not enable:\n continue\n\n if \"#\" in line:\n continue\n\n e_accepted = int(line.split(',')[1])\n e_blast = int(line.split(',')[4])\n e_automatic = int(line.split(',')[3])\n\n processor_name = line.split(',')[-1]\n\n if not (blast and event):\n if ((not e_blast) and (blast)) or ((e_blast) and (event)):\n continue\n\n if accepted and (not e_accepted):\n continue\n\n if manual and e_automatic:\n continue\n\n csv_string += line + '\\n'\n event_name = line.split(',')[0]\n\n df = pd.read_csv(StringIO(csv_string))\n\n events = []\n\n for row in df.iterrows():\n event_name = row[1]['EVENT_NAME']\n\n for k, element in enumerate(row[1]):\n if element == '-':\n row[1][k] = None\n\n event = Event()\n event.resource_id.id = row[1]['EVENT_NAME']\n extra = row[1].to_dict()\n\n for key in extra.keys():\n if key not in event.extra_keys:\n continue\n event.__setattr__(key, extra[key])\n\n # create the origin object\n origin = Origin()\n origin.x = row[1]['LOCATION_X']\n origin.y = row[1]['LOCATION_Y']\n origin.z = row[1]['LOCATION_Z']\n\n origin.time = EpochNano2UTCDateTime(int(row[1]['EVENT_TIME_NANOS']),\n timezone)\n\n if (row[1]['ACCEPTED'] == 1) and (row[1]['MANUALLY_PROCESSED'] == 1):\n origin.evaluation_status = 'reviewed'\n origin.evaluation_mode = 'manual'\n elif (row[1]['ACCEPTED'] == 0) and (row[1]['MANUALLY_PROCESSED'] == 1):\n origin.evaluation_status = 'rejected'\n origin.evaluation_mode = 'manual'\n elif (row[1]['ACCEPTED'] == 1) and (row[1]['MANUALLY_PROCESSED'] == 0):\n origin.evaluation_status = 'preliminary'\n origin.evaluation_mode = 'automatic'\n else:\n origin.evaluation_status = 'rejected'\n origin.evaluation_mode = 'manual'\n\n o_u = OriginUncertainty()\n o_u.confidence_ellipsoid = ConfidenceEllipsoid()\n origin.origin_uncertainty = o_u\n\n # create the magnitude object\n magnitude = Magnitude()\n magnitude.mag = -999\n magnitude.error = -999\n\n if row[1]['LOCAL_MAGNITUDE']:\n magnitude.mag = float(row[1]['LOCAL_MAGNITUDE'])\n\n if row[1]['CORNER_FREQUENCY']:\n try:\n magnitude.corner_frequency_hz = float(row[1][\n 'CORNER_FREQUENCY'])\n except ValueError:\n pass\n\n if row[1]['ENERGY_P']:\n try:\n magnitude.energy_p_joule = float(row[1]['ENERGY_P'])\n except ValueError:\n pass\n\n if row[1]['ENERGY_S']:\n try:\n magnitude.energy_s_joule = float(row[1]['ENERGY_S'])\n except ValueError:\n pass\n\n if row[1]['ENERGY_S'] and row[1]['ENERGY_P']:\n try:\n magnitude.energy_joule = float(row[1]['ENERGY_S']) + \\\n float(row[1]['ENERGY_P'])\n except ValueError:\n pass\n\n # if row[1]['STATIC_STRESS_DROP']:\n # magnitude.static_stress_drop_mpa = float(row[1][\n #\n #\n # 'STATIC_STRESS_DROP'])\n\n if row[1]['POTENCY_P'] and row[1]['POTENCY_S']:\n try:\n potency = (float(row[1]['POTENCY_P']) +\n float(row[1]['POTENCY_S'])) / 2\n magnitude.potency_m3 = potency\n except ValueError:\n pass\n\n magnitude.magnitude_type = 'Mw'\n magnitude.origin_id = origin.resource_id.id\n magnitude.evaluation_mode = 'manual'\n magnitude.evaluation_status = 'reviewed'\n\n event.origins.append(origin)\n event.preferred_origin_id = origin.resource_id.id\n\n event.magnitudes.append(magnitude)\n event.preferred_magnitude_id = magnitude.resource_id.id\n\n if row[1]['BLAST'] == 1:\n event.event_type = \"explosion\"\n elif row[1]['ACCEPTED'] == 0:\n event.event_type = 'other event'\n else:\n event.event_type = \"earthquake\"\n\n if get_arrivals:\n (picks, arrivals) = get_picks(base_url, event_name, inventory,\n timezone)\n\n event.picks = picks\n event.preferred_origin().arrivals = arrivals\n\n events.append(event)\n\n return Catalog(events=events)\n\n\ndef get_seismogram(base_url, sgram_name, network_code, site_code, timezone):\n \"\"\"\n Read a seismogram, one sensor (uni- or tri-axial) one event using the\n REST API interface from Synapse server and return a Stream\n :param base_url: base url of the IMS server e.g.\n http://10.95.74.35:8002/ims-database-server/databases/mgl\n :param sgram_name: Seismogram name as defined in IMS system\n :type sgram_name: string\n :param network_code: code of the network\n :type network_code: string\n :param site_code: site code\n :type site_code: str\n :return: a stream containing either 1 or 3 traces depending on the number of\n component\n :rtype: microquake.core.Stream\n \"\"\"\n\n url = base_url + '/sgrams/assoc/read_sgram?sgramName=%s' % sgram_name\n\n try:\n r = requests.get(url)\n except requests.RequestException as e:\n logger.error(e)\n return\n\n traces = []\n indata = False\n data = []\n ncomponent = 0\n\n for lsgram in r.iter_lines():\n lsgram = lsgram.decode('utf-8')\n\n if 'time-sample-0-nanos' in lsgram:\n s_starttime = EpochNano2UTCDateTime(int(lsgram.split('=')[-1]),\n timezone)\n\n if 'sampling-rate' in lsgram:\n sampling_rate = float(lsgram.split('=')[-1])\n\n if 'num-components' in lsgram:\n ncomponent = int(lsgram.split('=')[-1])\n\n if indata:\n if ncomponent == 1:\n data.append(float(lsgram.split(',')[-1]))\n\n if ncomponent == 3:\n tmp = [float(d) for d in lsgram.split(',')[1:]]\n data.append(tmp)\n\n if '#Samples' in lsgram:\n indata = True\n\n if ncomponent == 1:\n header = Stats()\n header.network = network_code\n header.sampling_rate = sampling_rate\n header.station = site_code\n header.channel = 'Z'\n header.starttime = s_starttime\n header.npts = len(data)\n tr = Trace(data=np.array(data).astype(np.float32), header=header)\n traces.append(tr)\n\n if ncomponent == 3:\n data = np.array(data).astype(np.float32)\n\n for k, channel in enumerate(['x', 'y', 'z']):\n header = Stats()\n header.network = network_code\n header.sampling_rate = sampling_rate\n header.station = site_code\n header.channel = channel\n header.starttime = s_starttime\n header.npts = len(data)\n tr = Trace(data=data[:, k], header=header)\n traces.append(tr)\n\n return Stream(traces=traces)\n\n\ndef get_picks(base_url, event_name, inventory, timezone):\n \"\"\"\n Read information for one event using the REST API provided by the IMS\n synapse server and return a Catalog object.\n\n :param base_url: base url of the IMS server e.g.\n http://10.95.74.35:8002/ims-database-server/databases/mgl\n :param event_name: event name\n :type event_name: string\n :param inventory: inventory object containing information on the network\n and sensors\n :type inventory: microquake.core.data.inventory\n :return: (list of picks, origin_time)\n :rtype: microquake.event.Catalog\n \"\"\"\n\n url = base_url + '/events/read_event?eventName=%s' % (event_name)\n try:\n r2 = requests.get(url)\n except requests.RequestException as e:\n logger.error(e)\n return\n\n origin = Origin()\n picks = []\n arrivals = []\n\n for line in r2.iter_lines():\n line = line.decode('utf-8')\n # if 'event-time' in line:\n\n if 'loc-t0-nanos' in line:\n try:\n origin.time = EpochNano2UTCDateTime(int(line.split('=')[-1]),\n timezone)\n except:\n origin.time = UTCDateTime.now()\n elif 'accepted' in line:\n if 'true' in line:\n origin.evaluation_status = 'reviewed'\n# No information is provided to really know what the status is. Assuming manual.\n origin.evaluation_mode = 'manual'\n else:\n origin.evaluation_status = 'rejected'\n origin.evaluation_mode = 'manual'\n elif 'loc-south' in line:\n origin.y = -float(line.split('=')[-1])\n elif 'loc-west' in line:\n origin.x = -float(line.split('=')[-1])\n elif 'loc-down' in line:\n origin.z = -float(line.split('=')[-1])\n# elif 'local-magnitude' in line:\n\n elif 't.' in line:\n if 'index' in line:\n waveform_id = WaveformStreamID()\n elif 'site-id' in line:\n station_code = line.split('=')[-1]\n waveform_id.station_code = station_code\n elif 'accepted=false' in line:\n continue\n elif 'pick-p-time-nanos' in line:\n pick = Pick()\n arrival = Arrival()\n pick.time = EpochNano2UTCDateTime(int(line.split('=')[-1]),\n timezone)\n pick.phase_hint = 'P'\n pick.waveform_id = waveform_id\n pick.evaluation_mode = origin.evaluation_mode\n pick.evaluation_status = origin.evaluation_status\n arrival.pick_id = pick.resource_id.id\n arrival.phase = 'P'\n\n station = inventory.select(str(station_code))\n if station is None:\n logger.warning(\"Station %s not found!\\n The station \"\n \"object needs to be updated\" % station_code)\n continue\n\n arrival.distance = np.linalg.norm(station.loc - origin.loc)\n arrival.takeoff_angle = np.arccos((station.z - origin.z)\n / arrival.distance) * 180 \\\n / np.pi\n dx = station.x - origin.x\n dy = station.y - origin.y\n arrival.azimuth = np.arctan2(dx, dy) * 180 / np.pi\n picks.append(pick)\n arrivals.append(arrival)\n\n elif 'pick-s-time-nanos' in line:\n pick = Pick()\n arrival = Arrival()\n pick.time = EpochNano2UTCDateTime(int(line.split('=')[-1]),\n timezone)\n pick.phase_hint = 'S'\n pick.waveform_id = waveform_id\n pick.evaluation_mode = origin.evaluation_mode\n pick.evaluation_status = origin.evaluation_status\n arrival.pick_id = pick.resource_id.id\n arrival.phase = 'S'\n station = inventory.select(station_code)\n\n arrival.distance = np.linalg.norm(station.loc - origin.loc)\n arrival.takeoff_angle = np.arccos((station.z - origin.z)\n / arrival.distance) * 180 / np.pi\n dx = station.x - origin.x\n dy = station.y - origin.y\n arrival.azimuth = np.arctan2(dx, dy) * 180 / np.pi\n picks.append(pick)\n arrivals.append(arrival)\n\n return (picks, arrivals)\n\n\ndef get_picks_event(base_url, event, site, timezone):\n \"\"\"\n get pick for an microquake event\n :param base_url:\n :param event:\n :param site:\n :return: event\n \"\"\"\n\n event_name = event.EVENT_NAME\n\n (picks, arrivals) = get_picks(base_url, event_name, site, timezone)\n\n event.preferred_origin().arrivals = arrivals\n event.picks = picks\n\n return event\n\n\ndef get_seismogram_event(base_url, event, network_code, timezone):\n \"\"\"\n Read the seismograms related to an event using the IMS REST API interface\n :param base_url: base url of the IMS server e.g.\n http://10.95.74.35:8002/ims-database-server/databases/mgl\n :type base_url: string\n :param event: an event containing an origins, arrivals and picks\n :type event: microquake.core.event.Event\n :param network_code: code of the network\n :type network_code: string\n :return: a stream of traces\n :rtype: microquake.core.Stream\n \"\"\"\n\n seismogram_names = event.ASSOC_SEISMOGRAM_NAMES.split(';')\n station_codes = event.TRIGGERED_SITES.split(';')\n traces = []\n\n for sname, station_code in zip(seismogram_names, station_codes):\n try:\n st = get_seismogram(base_url, sname, network_code, station_code,\n timezone)\n except requests.exceptions.RequestException as e:\n logger.error(e)\n\n st = get_seismogram(base_url, sname, network_code, station_code,\n timezone)\n\n\n for tr in st:\n traces.append(tr)\n\n return Stream(traces=traces)\n\n\ndef get_range(base_url, start_datetime, end_datetime, site, network_code,\n blast=True, event=True, accepted=True, manual=True):\n \"\"\"\n read catalogue, picks, and seismogram for a range of date through the REST\n API provided by the IMS synapse server\n\n :param base_url: base url of the IMS server e.g.\n http://10.95.74.35:8002/ims-database-server/databases/mgl\n :param start_datetime: request start time\n :type start_datetime: datetime.datetime\n :param end_datetime: request end time\n :type end_datetime: datetime.datetype\n :param site: a site object containing system information\n :type site: microquake.core.data.station.Site\n :param blast: if True return blasts (default True)\n :type blast: bool\n :param event: if True return events (default True)\n :type event: bool\n :param network_code: network code\n :param accepted: if True only accepted events and blasts are returned (\n default True)\n :type accepted: bool\n :param manual: if True only manually processed event are returned (\n default True)\n :param time_zone: time zone name see pytz for a list of time zones\n :return: a list of catalog and stream tuple\n \"\"\"\n\n events = get_catalogue(base_url, start_datetime, end_datetime, site,\n blast, event, accepted, manual)\n\n streams = [get_seismogram_event(base_url, event, network_code) for event in\n events]\n\n catalogs = [Catalog(events=[event]) for event in events]\n\n return [(cat, st) for cat, st in zip(catalogs, streams)]\n"
] |
[
[
"numpy.isnan",
"numpy.arange",
"numpy.vstack",
"numpy.ndarray",
"numpy.linalg.norm",
"numpy.arccos",
"numpy.arctan2",
"numpy.diff",
"numpy.interp",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] |
TrueNobility303/Raphael-style-transfer-CNN
|
[
"ebffc635701d9966ab68c261f3fa0ce8f831e395",
"ebffc635701d9966ab68c261f3fa0ce8f831e395"
] |
[
"net/gan.py",
"net/swap.py"
] |
[
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n# for GAN, REF is also my github: https://github.com/TrueNobility303/GAN-face-generator\n\n# implement of cycle GAN\n# 也可视为一种跨域操作,使用GAN从A域跨到B域\n\n#使用instance norm代替BN\ndef Conv(n_input, n_output, k_size=4, stride=2, padding=0, bn=False):\n return nn.Sequential(\n nn.Conv2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False),\n nn.InstanceNorm2d(n_output,affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout(p=0.2, inplace=False))\n\ndef Deconv(n_input, n_output, k_size=4, stride=2, padding=1):\n return nn.Sequential(\n nn.ConvTranspose2d(n_input, n_output,kernel_size=k_size,stride=stride, padding=padding,bias=False),\n nn.InstanceNorm2d(n_output,affine=True),\n nn.ReLU(inplace=True))\n\nclass Discriminator(nn.Module):\n def __init__(self, nc=64):\n super(Discriminator, self).__init__()\n self.net = nn.Sequential(\n nn.Conv2d(3, nc,kernel_size=4,stride=2,padding=1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n Conv(nc, nc*2, 4,2,1),\n Conv(nc*2, nc*4, 4,2,1),\n Conv(nc*4, nc*8, 4,2,1),\n nn.Conv2d(nc*8, 1,4,1,0, bias=False),\n nn.Sigmoid())\n \n def forward(self, input):\n #print(input.shape)\n return self.net(input)\n\n\n",
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.models import vgg19\nimport torch \n\nclass VGGEncoder(nn.Module):\n def __init__(self):\n super().__init__()\n vgg = vgg19(pretrained=True).features\n self.slice1 = vgg[: 2]\n self.slice2 = vgg[2: 7]\n self.slice3 = vgg[7: 12]\n for p in self.parameters():\n p.requires_grad = False\n\n def forward(self, images, retrun_hidden_features=False):\n h1 = self.slice1(images)\n h2 = self.slice2(h1)\n h3 = self.slice3(h2)\n if retrun_hidden_features is False:\n return h3\n else: \n return h1,h2,h3\n\nclass RC(nn.Module):\n \"\"\"A wrapper of ReflectionPad2d and Conv2d\"\"\"\n def __init__(self, in_channels, out_channels, kernel_size=3, pad_size=1, activated=True):\n super().__init__()\n self.pad = nn.ReflectionPad2d((pad_size, pad_size, pad_size, pad_size))\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size)\n self.activated = activated\n\n def forward(self, x):\n h = self.pad(x)\n h = self.conv(h)\n if self.activated:\n return F.relu(h)\n else:\n return h\n\nclass Decoder(nn.Module):\n def __init__(self):\n super().__init__()\n self.rc1 = RC(256, 128, 3, 1)\n self.rc2 = RC(128, 128, 3, 1)\n self.rc3 = RC(128, 64, 3, 1)\n self.rc4 = RC(64, 64, 3, 1)\n self.rc5 = RC(64, 3, 3, 1, False)\n\n def forward(self, features):\n h = self.rc1(features)\n h = F.interpolate(h, scale_factor=2)\n h = self.rc2(h)\n h = self.rc3(h)\n h = F.interpolate(h, scale_factor=2)\n h = self.rc4(h)\n h = self.rc5(h)\n return torch.sigmoid(h)\n\ndef style_swap(content_feature, style_feature, kernel_size, stride=1):\n kh, kw = kernel_size, kernel_size\n sh, sw = stride, stride\n\n patches = style_feature.unfold(2, kh, sh).unfold(3, kw, sw)\n\n patches = patches.permute(0, 2, 3, 1, 4, 5)\n patches = patches.reshape(-1, *patches.shape[-3:]) # (patch_numbers, C, kh, kw)\n\n norm = torch.norm(patches.reshape(patches.shape[0], -1), dim=1).reshape(-1, 1, 1, 1)\n\n noramalized_patches = patches / norm\n\n conv_out = F.conv2d(content_feature, noramalized_patches)\n\n one_hots = torch.zeros_like(conv_out)\n one_hots.scatter_(1, conv_out.argmax(dim=1, keepdim=True), 1)\n\n deconv_out = F.conv_transpose2d(one_hots, patches)\n\n overlap = F.conv_transpose2d(one_hots, torch.ones_like(patches))\n\n res = deconv_out / overlap\n return res\n\ndef TVloss(img, tv_weight):\n w_variance = torch.sum(torch.pow(img[:, :, :, :-1] - img[:, :, :, 1:], 2))\n h_variance = torch.sum(torch.pow(img[:, :, :-1, :] - img[:, :, 1:, :], 2))\n loss = tv_weight * (h_variance + w_variance)\n return loss\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.InstanceNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ReLU"
],
[
"torch.nn.functional.conv_transpose2d",
"torch.sigmoid",
"torch.nn.ReflectionPad2d",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.pow",
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"torch.ones_like"
]
] |
ShellyDong/AlphaGo
|
[
"04bac9c9d16c16c3baf16988ac761903a687d4de"
] |
[
"tests/test_policy.py"
] |
[
"import os\nimport unittest\nimport numpy as np\nfrom AlphaGo import go\nfrom AlphaGo.go import GameState\nfrom AlphaGo.models.policy import CNNPolicy, ResnetPolicy\nfrom AlphaGo.ai import GreedyPolicyPlayer, ProbabilisticPolicyPlayer\n\n\nclass TestCNNPolicy(unittest.TestCase):\n\n def test_default_policy(self):\n\n policy = CNNPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"])\n policy.eval_state(GameState())\n # just hope nothing breaks\n\n def test_batch_eval_state(self):\n\n policy = CNNPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"])\n results = policy.batch_eval_state([GameState(), GameState()])\n self.assertEqual(len(results), 2) # one result per GameState\n self.assertEqual(len(results[0]), 361) # each one has 361 (move,prob) pairs\n\n def test_output_size(self):\n\n policy19 = CNNPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"], board=19)\n output = policy19.forward(policy19.preprocessor.state_to_tensor(GameState()))\n self.assertEqual(output.shape, (1, 19 * 19))\n\n policy13 = CNNPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"], board=13)\n output = policy13.forward(policy13.preprocessor.state_to_tensor(GameState(size=13)))\n self.assertEqual(output.shape, (1, 13 * 13))\n\n def test_save_load(self):\n\n policy = CNNPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"])\n\n model_file = 'TESTPOLICY.json'\n weights_file = 'TESTWEIGHTS.h5'\n model_file2 = 'TESTPOLICY2.json'\n weights_file2 = 'TESTWEIGHTS2.h5'\n\n # test saving model/weights separately\n policy.save_model(model_file)\n policy.model.save_weights(weights_file, overwrite=True)\n # test saving them together\n policy.save_model(model_file2, weights_file2)\n\n copypolicy = CNNPolicy.load_model(model_file)\n copypolicy.model.load_weights(weights_file)\n\n copypolicy2 = CNNPolicy.load_model(model_file2)\n\n for w1, w2 in zip(copypolicy.model.get_weights(), copypolicy2.model.get_weights()):\n self.assertTrue(np.all(w1 == w2))\n\n os.remove(model_file)\n os.remove(weights_file)\n os.remove(model_file2)\n os.remove(weights_file2)\n\n\nclass TestResnetPolicy(unittest.TestCase):\n def test_default_policy(self):\n\n policy = ResnetPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"])\n policy.eval_state(GameState())\n # just hope nothing breaks\n\n def test_batch_eval_state(self):\n\n policy = ResnetPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"])\n results = policy.batch_eval_state([GameState(), GameState()])\n self.assertEqual(len(results), 2) # one result per GameState\n self.assertEqual(len(results[0]), 361) # each one has 361 (move,prob) pairs\n\n def test_save_load(self):\n \"\"\"\n Identical to above test_save_load\n \"\"\"\n\n policy = ResnetPolicy([\"board\", \"liberties\", \"sensibleness\", \"capture_size\"])\n\n model_file = 'TESTPOLICY.json'\n weights_file = 'TESTWEIGHTS.h5'\n model_file2 = 'TESTPOLICY2.json'\n weights_file2 = 'TESTWEIGHTS2.h5'\n\n # test saving model/weights separately\n policy.save_model(model_file)\n policy.model.save_weights(weights_file, overwrite=True)\n # test saving them together\n policy.save_model(model_file2, weights_file2)\n\n copypolicy = ResnetPolicy.load_model(model_file)\n copypolicy.model.load_weights(weights_file)\n\n copypolicy2 = ResnetPolicy.load_model(model_file2)\n\n for w1, w2 in zip(copypolicy.model.get_weights(), copypolicy2.model.get_weights()):\n self.assertTrue(np.all(w1 == w2))\n\n # check that save/load keeps the ResnetPolicy class\n self.assertTrue(type(policy) == type(copypolicy))\n\n os.remove(model_file)\n os.remove(weights_file)\n os.remove(model_file2)\n os.remove(weights_file2)\n\n\nclass TestPlayers(unittest.TestCase):\n\n def test_greedy_player(self):\n\n gs = GameState()\n policy = CNNPolicy([\"board\", \"ones\", \"turns_since\"])\n player = GreedyPolicyPlayer(policy)\n for _ in range(20):\n move = player.get_move(gs)\n self.assertNotEqual(move, go.PASS)\n gs.do_move(move)\n\n def test_probabilistic_player(self):\n\n gs = GameState()\n policy = CNNPolicy([\"board\", \"ones\", \"turns_since\"])\n player = ProbabilisticPolicyPlayer(policy)\n for _ in range(20):\n move = player.get_move(gs)\n self.assertNotEqual(move, go.PASS)\n gs.do_move(move)\n\n def test_sensible_probabilistic(self):\n\n gs = GameState()\n policy = CNNPolicy([\"board\", \"ones\", \"turns_since\"])\n player = ProbabilisticPolicyPlayer(policy)\n empty = (10, 10)\n for x in range(19):\n for y in range(19):\n if (x, y) != empty:\n gs.do_move((x, y), go.BLACK)\n gs.set_current_player(go.BLACK)\n self.assertEqual(player.get_move(gs), go.PASS)\n\n def test_sensible_greedy(self):\n\n gs = GameState()\n policy = CNNPolicy([\"board\", \"ones\", \"turns_since\"])\n player = GreedyPolicyPlayer(policy)\n empty = (10, 10)\n for x in range(19):\n for y in range(19):\n if (x, y) != empty:\n gs.do_move((x, y), go.BLACK)\n\n gs.set_current_player(go.BLACK)\n self.assertEqual(player.get_move(gs), go.PASS)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.all"
]
] |
DiceDn/XgbTest
|
[
"7e32a5adf9c27608518264caf93bc6b723ce0168"
] |
[
"test/venv/Lib/site-packages/sklearn/compose/_column_transformer.py"
] |
[
"\"\"\"\nThe :mod:`sklearn.compose._column_transformer` module implements utilities\nto work with heterogeneous data and to apply different transformers to\ndifferent columns.\n\"\"\"\n# Author: Andreas Mueller\n# Joris Van den Bossche\n# License: BSD\nfrom __future__ import division\n\nfrom itertools import chain\n\nimport numpy as np\nimport warnings\nfrom scipy import sparse\n\nfrom ..base import clone, TransformerMixin\nfrom ..utils._joblib import Parallel, delayed\nfrom ..externals import six\nfrom ..pipeline import _fit_transform_one, _transform_one, _name_estimators\nfrom ..preprocessing import FunctionTransformer\nfrom ..utils import Bunch\nfrom ..utils.metaestimators import _BaseComposition\nfrom ..utils.validation import check_array, check_is_fitted\n\n\n__all__ = ['ColumnTransformer', 'make_column_transformer']\n\n\n_ERR_MSG_1DCOLUMN = (\"1D data passed to a transformer that expects 2D data. \"\n \"Try to specify the column selection as a list of one \"\n \"item instead of a scalar.\")\n\n\nclass ColumnTransformer(_BaseComposition, TransformerMixin):\n \"\"\"Applies transformers to columns of an array or pandas DataFrame.\n\n EXPERIMENTAL: some behaviors may change between releases without\n deprecation.\n\n This estimator allows different columns or column subsets of the input\n to be transformed separately and the features generated by each transformer\n will be concatenated to form a single feature space.\n This is useful for heterogeneous or columnar data, to combine several\n feature extraction mechanisms or transformations into a single transformer.\n\n Read more in the :ref:`User Guide <column_transformer>`.\n\n .. versionadded:: 0.20\n\n Parameters\n ----------\n transformers : list of tuples\n List of (name, transformer, column(s)) tuples specifying the\n transformer objects to be applied to subsets of the data.\n\n name : string\n Like in Pipeline and FeatureUnion, this allows the transformer and\n its parameters to be set using ``set_params`` and searched in grid\n search.\n transformer : estimator or {'passthrough', 'drop'}\n Estimator must support `fit` and `transform`. Special-cased\n strings 'drop' and 'passthrough' are accepted as well, to\n indicate to drop the columns or to pass them through untransformed,\n respectively.\n column(s) : string or int, array-like of string or int, slice, \\\nboolean mask array or callable\n Indexes the data on its second axis. Integers are interpreted as\n positional columns, while strings can reference DataFrame columns\n by name. A scalar string or int should be used where\n ``transformer`` expects X to be a 1d array-like (vector),\n otherwise a 2d array will be passed to the transformer.\n A callable is passed the input data `X` and can return any of the\n above.\n\n remainder : {'drop', 'passthrough'} or estimator, default 'drop'\n By default, only the specified columns in `transformers` are\n transformed and combined in the output, and the non-specified\n columns are dropped. (default of ``'drop'``).\n By specifying ``remainder='passthrough'``, all remaining columns that\n were not specified in `transformers` will be automatically passed\n through. This subset of columns is concatenated with the output of\n the transformers.\n By setting ``remainder`` to be an estimator, the remaining\n non-specified columns will use the ``remainder`` estimator. The\n estimator must support `fit` and `transform`.\n\n sparse_threshold : float, default = 0.3\n If the output of the different transfromers contains sparse matrices,\n these will be stacked as a sparse matrix if the overall density is\n lower than this value. Use ``sparse_threshold=0`` to always return\n dense. When the transformed output consists of all dense data, the\n stacked result will be dense, and this keyword will be ignored.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n transformer_weights : dict, optional\n Multiplicative weights for features per transformer. The output of the\n transformer is multiplied by these weights. Keys are transformer names,\n values the weights.\n\n Attributes\n ----------\n transformers_ : list\n The collection of fitted transformers as tuples of\n (name, fitted_transformer, column). `fitted_transformer` can be an\n estimator, 'drop', or 'passthrough'. In case there were no columns\n selected, this will be the unfitted transformer.\n If there are remaining columns, the final element is a tuple of the\n form:\n ('remainder', transformer, remaining_columns) corresponding to the\n ``remainder`` parameter. If there are remaining columns, then\n ``len(transformers_)==len(transformers)+1``, otherwise\n ``len(transformers_)==len(transformers)``.\n\n named_transformers_ : Bunch object, a dictionary with attribute access\n Read-only attribute to access any transformer by given name.\n Keys are transformer names and values are the fitted transformer\n objects.\n\n sparse_output_ : boolean\n Boolean flag indicating wether the output of ``transform`` is a\n sparse matrix or a dense numpy array, which depends on the output\n of the individual transformers and the `sparse_threshold` keyword.\n\n Notes\n -----\n The order of the columns in the transformed feature matrix follows the\n order of how the columns are specified in the `transformers` list.\n Columns of the original feature matrix that are not specified are\n dropped from the resulting transformed feature matrix, unless specified\n in the `passthrough` keyword. Those columns specified with `passthrough`\n are added at the right to the output of the transformers.\n\n See also\n --------\n sklearn.compose.make_column_transformer : convenience function for\n combining the outputs of multiple transformer objects applied to\n column subsets of the original feature space.\n\n Examples\n --------\n >>> from sklearn.compose import ColumnTransformer\n >>> from sklearn.preprocessing import Normalizer\n >>> ct = ColumnTransformer(\n ... [(\"norm1\", Normalizer(norm='l1'), [0, 1]),\n ... (\"norm2\", Normalizer(norm='l1'), slice(2, 4))])\n >>> X = np.array([[0., 1., 2., 2.],\n ... [1., 1., 0., 1.]])\n >>> # Normalizer scales each row of X to unit norm. A separate scaling\n >>> # is applied for the two first and two last elements of each\n >>> # row independently.\n >>> ct.fit_transform(X) # doctest: +NORMALIZE_WHITESPACE\n array([[0. , 1. , 0.5, 0.5],\n [0.5, 0.5, 0. , 1. ]])\n\n \"\"\"\n\n def __init__(self, transformers, remainder='drop', sparse_threshold=0.3,\n n_jobs=None, transformer_weights=None):\n self.transformers = transformers\n self.remainder = remainder\n self.sparse_threshold = sparse_threshold\n self.n_jobs = n_jobs\n self.transformer_weights = transformer_weights\n\n @property\n def _transformers(self):\n \"\"\"\n Internal list of transformer only containing the name and\n transformers, dropping the columns. This is for the implementation\n of get_params via BaseComposition._get_params which expects lists\n of tuples of len 2.\n \"\"\"\n return [(name, trans) for name, trans, _ in self.transformers]\n\n @_transformers.setter\n def _transformers(self, value):\n self.transformers = [\n (name, trans, col) for ((name, trans), (_, _, col))\n in zip(value, self.transformers)]\n\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n return self._get_params('_transformers', deep=deep)\n\n def set_params(self, **kwargs):\n \"\"\"Set the parameters of this estimator.\n\n Valid parameter keys can be listed with ``get_params()``.\n\n Returns\n -------\n self\n \"\"\"\n self._set_params('_transformers', **kwargs)\n return self\n\n def _iter(self, fitted=False, replace_strings=False):\n \"\"\"\n Generate (name, trans, column, weight) tuples.\n\n If fitted=True, use the fitted transformers, else use the\n user specified transformers updated with converted column names\n and potentially appended with transformer for remainder.\n\n \"\"\"\n if fitted:\n transformers = self.transformers_\n else:\n # interleave the validated column specifiers\n transformers = [\n (name, trans, column) for (name, trans, _), column\n in zip(self.transformers, self._columns)\n ]\n # add transformer tuple for remainder\n if self._remainder[2] is not None:\n transformers = chain(transformers, [self._remainder])\n get_weight = (self.transformer_weights or {}).get\n\n for name, trans, column in transformers:\n if replace_strings:\n # replace 'passthrough' with identity transformer and\n # skip in case of 'drop'\n if trans == 'passthrough':\n trans = FunctionTransformer(\n validate=False, accept_sparse=True,\n check_inverse=False)\n elif trans == 'drop':\n continue\n elif _is_empty_column_selection(column):\n continue\n\n yield (name, trans, column, get_weight(name))\n\n def _validate_transformers(self):\n if not self.transformers:\n return\n\n names, transformers, _ = zip(*self.transformers)\n\n # validate names\n self._validate_names(names)\n\n # validate estimators\n for t in transformers:\n if t in ('drop', 'passthrough'):\n continue\n if (not (hasattr(t, \"fit\") or hasattr(t, \"fit_transform\")) or not\n hasattr(t, \"transform\")):\n raise TypeError(\"All estimators should implement fit and \"\n \"transform, or can be 'drop' or 'passthrough' \"\n \"specifiers. '%s' (type %s) doesn't.\" %\n (t, type(t)))\n\n def _validate_column_callables(self, X):\n \"\"\"\n Converts callable column specifications.\n \"\"\"\n columns = []\n for _, _, column in self.transformers:\n if callable(column):\n column = column(X)\n columns.append(column)\n self._columns = columns\n\n def _validate_remainder(self, X):\n \"\"\"\n Validates ``remainder`` and defines ``_remainder`` targeting\n the remaining columns.\n \"\"\"\n is_transformer = ((hasattr(self.remainder, \"fit\")\n or hasattr(self.remainder, \"fit_transform\"))\n and hasattr(self.remainder, \"transform\"))\n if (self.remainder not in ('drop', 'passthrough')\n and not is_transformer):\n raise ValueError(\n \"The remainder keyword needs to be one of 'drop', \"\n \"'passthrough', or estimator. '%s' was passed instead\" %\n self.remainder)\n\n n_columns = X.shape[1]\n cols = []\n for columns in self._columns:\n cols.extend(_get_column_indices(X, columns))\n remaining_idx = sorted(list(set(range(n_columns)) - set(cols))) or None\n\n self._remainder = ('remainder', self.remainder, remaining_idx)\n\n @property\n def named_transformers_(self):\n \"\"\"Access the fitted transformer by name.\n\n Read-only attribute to access any transformer by given name.\n Keys are transformer names and values are the fitted transformer\n objects.\n\n \"\"\"\n # Use Bunch object to improve autocomplete\n return Bunch(**dict([(name, trans) for name, trans, _\n in self.transformers_]))\n\n def get_feature_names(self):\n \"\"\"Get feature names from all transformers.\n\n Returns\n -------\n feature_names : list of strings\n Names of the features produced by transform.\n \"\"\"\n check_is_fitted(self, 'transformers_')\n feature_names = []\n for name, trans, _, _ in self._iter(fitted=True):\n if trans == 'drop':\n continue\n elif trans == 'passthrough':\n raise NotImplementedError(\n \"get_feature_names is not yet supported when using \"\n \"a 'passthrough' transformer.\")\n elif not hasattr(trans, 'get_feature_names'):\n raise AttributeError(\"Transformer %s (type %s) does not \"\n \"provide get_feature_names.\"\n % (str(name), type(trans).__name__))\n feature_names.extend([name + \"__\" + f for f in\n trans.get_feature_names()])\n return feature_names\n\n def _update_fitted_transformers(self, transformers):\n # transformers are fitted; excludes 'drop' cases\n fitted_transformers = iter(transformers)\n transformers_ = []\n\n for name, old, column, _ in self._iter():\n if old == 'drop':\n trans = 'drop'\n elif old == 'passthrough':\n # FunctionTransformer is present in list of transformers,\n # so get next transformer, but save original string\n next(fitted_transformers)\n trans = 'passthrough'\n elif _is_empty_column_selection(column):\n trans = old\n else:\n trans = next(fitted_transformers)\n transformers_.append((name, trans, column))\n\n # sanity check that transformers is exhausted\n assert not list(fitted_transformers)\n self.transformers_ = transformers_\n\n def _validate_output(self, result):\n \"\"\"\n Ensure that the output of each transformer is 2D. Otherwise\n hstack can raise an error or produce incorrect results.\n \"\"\"\n names = [name for name, _, _, _ in self._iter(fitted=True,\n replace_strings=True)]\n for Xs, name in zip(result, names):\n if not getattr(Xs, 'ndim', 0) == 2:\n raise ValueError(\n \"The output of the '{0}' transformer should be 2D (scipy \"\n \"matrix, array, or pandas DataFrame).\".format(name))\n\n def _fit_transform(self, X, y, func, fitted=False):\n \"\"\"\n Private function to fit and/or transform on demand.\n\n Return value (transformers and/or transformed X data) depends\n on the passed function.\n ``fitted=True`` ensures the fitted transformers are used.\n \"\"\"\n try:\n return Parallel(n_jobs=self.n_jobs)(\n delayed(func)(clone(trans) if not fitted else trans,\n _get_column(X, column), y, weight)\n for _, trans, column, weight in self._iter(\n fitted=fitted, replace_strings=True))\n except ValueError as e:\n if \"Expected 2D array, got 1D array instead\" in str(e):\n raise ValueError(_ERR_MSG_1DCOLUMN)\n else:\n raise\n\n def fit(self, X, y=None):\n \"\"\"Fit all transformers using X.\n\n Parameters\n ----------\n X : array-like or DataFrame of shape [n_samples, n_features]\n Input data, of which specified subsets are used to fit the\n transformers.\n\n y : array-like, shape (n_samples, ...), optional\n Targets for supervised learning.\n\n Returns\n -------\n self : ColumnTransformer\n This estimator\n\n \"\"\"\n # we use fit_transform to make sure to set sparse_output_ (for which we\n # need the transformed data) to have consistent output type in predict\n self.fit_transform(X, y=y)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit all transformers, transform the data and concatenate results.\n\n Parameters\n ----------\n X : array-like or DataFrame of shape [n_samples, n_features]\n Input data, of which specified subsets are used to fit the\n transformers.\n\n y : array-like, shape (n_samples, ...), optional\n Targets for supervised learning.\n\n Returns\n -------\n X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)\n hstack of results of transformers. sum_n_components is the\n sum of n_components (output dimension) over transformers. If\n any result is a sparse matrix, everything will be converted to\n sparse matrices.\n\n \"\"\"\n X = _check_X(X)\n self._validate_transformers()\n self._validate_column_callables(X)\n self._validate_remainder(X)\n\n result = self._fit_transform(X, y, _fit_transform_one)\n\n if not result:\n self._update_fitted_transformers([])\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n\n Xs, transformers = zip(*result)\n\n # determine if concatenated output will be sparse or not\n if any(sparse.issparse(X) for X in Xs):\n nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)\n total = sum(X.shape[0] * X.shape[1] if sparse.issparse(X)\n else X.size for X in Xs)\n density = nnz / total\n self.sparse_output_ = density < self.sparse_threshold\n else:\n self.sparse_output_ = False\n\n self._update_fitted_transformers(transformers)\n self._validate_output(Xs)\n\n return self._hstack(list(Xs))\n\n def transform(self, X):\n \"\"\"Transform X separately by each transformer, concatenate results.\n\n Parameters\n ----------\n X : array-like or DataFrame of shape [n_samples, n_features]\n The data to be transformed by subset.\n\n Returns\n -------\n X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)\n hstack of results of transformers. sum_n_components is the\n sum of n_components (output dimension) over transformers. If\n any result is a sparse matrix, everything will be converted to\n sparse matrices.\n\n \"\"\"\n check_is_fitted(self, 'transformers_')\n\n X = _check_X(X)\n Xs = self._fit_transform(X, None, _transform_one, fitted=True)\n self._validate_output(Xs)\n\n if not Xs:\n # All transformers are None\n return np.zeros((X.shape[0], 0))\n\n return self._hstack(list(Xs))\n\n def _hstack(self, Xs):\n \"\"\"Stacks Xs horizontally.\n\n This allows subclasses to control the stacking behavior, while reusing\n everything else from ColumnTransformer.\n\n Parameters\n ----------\n Xs : List of numpy arrays, sparse arrays, or DataFrames\n \"\"\"\n if self.sparse_output_:\n try:\n # since all columns should be numeric before stacking them\n # in a sparse matrix, `check_array` is used for the\n # dtype conversion if necessary.\n converted_Xs = [check_array(X,\n accept_sparse=True,\n force_all_finite=False)\n for X in Xs]\n except ValueError:\n raise ValueError(\"For a sparse output, all columns should\"\n \" be a numeric or convertible to a numeric.\")\n\n return sparse.hstack(converted_Xs).tocsr()\n else:\n Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]\n return np.hstack(Xs)\n\n\ndef _check_X(X):\n \"\"\"Use check_array only on lists and other non-array-likes / sparse\"\"\"\n if hasattr(X, '__array__') or sparse.issparse(X):\n return X\n return check_array(X, force_all_finite='allow-nan', dtype=np.object)\n\n\ndef _check_key_type(key, superclass):\n \"\"\"\n Check that scalar, list or slice is of a certain type.\n\n This is only used in _get_column and _get_column_indices to check\n if the `key` (column specification) is fully integer or fully string-like.\n\n Parameters\n ----------\n key : scalar, list, slice, array-like\n The column specification to check\n superclass : int or six.string_types\n The type for which to check the `key`\n\n \"\"\"\n if isinstance(key, superclass):\n return True\n if isinstance(key, slice):\n return (isinstance(key.start, (superclass, type(None))) and\n isinstance(key.stop, (superclass, type(None))))\n if isinstance(key, list):\n return all(isinstance(x, superclass) for x in key)\n if hasattr(key, 'dtype'):\n if superclass is int:\n return key.dtype.kind == 'i'\n else:\n # superclass = six.string_types\n return key.dtype.kind in ('O', 'U', 'S')\n return False\n\n\ndef _get_column(X, key):\n \"\"\"\n Get feature column(s) from input data X.\n\n Supported input types (X): numpy arrays, sparse arrays and DataFrames\n\n Supported key types (key):\n - scalar: output is 1D\n - lists, slices, boolean masks: output is 2D\n - callable that returns any of the above\n\n Supported key data types:\n\n - integer or boolean mask (positional):\n - supported for arrays, sparse matrices and dataframes\n - string (key-based):\n - only supported for dataframes\n - So no keys other than strings are allowed (while in principle you\n can use any hashable object as key).\n\n \"\"\"\n # check whether we have string column names or integers\n if _check_key_type(key, int):\n column_names = False\n elif _check_key_type(key, six.string_types):\n column_names = True\n elif hasattr(key, 'dtype') and np.issubdtype(key.dtype, np.bool_):\n # boolean mask\n column_names = False\n if hasattr(X, 'loc'):\n # pandas boolean masks don't work with iloc, so take loc path\n column_names = True\n else:\n raise ValueError(\"No valid specification of the columns. Only a \"\n \"scalar, list or slice of all integers or all \"\n \"strings, or boolean mask is allowed\")\n\n if column_names:\n if hasattr(X, 'loc'):\n # pandas dataframes\n return X.loc[:, key]\n else:\n raise ValueError(\"Specifying the columns using strings is only \"\n \"supported for pandas DataFrames\")\n else:\n if hasattr(X, 'iloc'):\n # pandas dataframes\n return X.iloc[:, key]\n else:\n # numpy arrays, sparse arrays\n return X[:, key]\n\n\ndef _get_column_indices(X, key):\n \"\"\"\n Get feature column indices for input data X and key.\n\n For accepted values of `key`, see the docstring of _get_column\n\n \"\"\"\n n_columns = X.shape[1]\n\n if _check_key_type(key, int):\n if isinstance(key, int):\n return [key]\n elif isinstance(key, slice):\n return list(range(n_columns)[key])\n else:\n return list(key)\n\n elif _check_key_type(key, six.string_types):\n try:\n all_columns = list(X.columns)\n except AttributeError:\n raise ValueError(\"Specifying the columns using strings is only \"\n \"supported for pandas DataFrames\")\n if isinstance(key, six.string_types):\n columns = [key]\n elif isinstance(key, slice):\n start, stop = key.start, key.stop\n if start is not None:\n start = all_columns.index(start)\n if stop is not None:\n # pandas indexing with strings is endpoint included\n stop = all_columns.index(stop) + 1\n else:\n stop = n_columns + 1\n return list(range(n_columns)[slice(start, stop)])\n else:\n columns = list(key)\n\n return [all_columns.index(col) for col in columns]\n\n elif hasattr(key, 'dtype') and np.issubdtype(key.dtype, np.bool_):\n # boolean mask\n return list(np.arange(n_columns)[key])\n else:\n raise ValueError(\"No valid specification of the columns. Only a \"\n \"scalar, list or slice of all integers or all \"\n \"strings, or boolean mask is allowed\")\n\n\ndef _is_empty_column_selection(column):\n \"\"\"\n Return True if the column selection is empty (empty list or all-False\n boolean array).\n\n \"\"\"\n if hasattr(column, 'dtype') and np.issubdtype(column.dtype, np.bool_):\n return not column.any()\n elif hasattr(column, '__len__'):\n return len(column) == 0\n else:\n return False\n\n\ndef _validate_transformers(transformers):\n \"\"\"Checks if given transformers are valid.\n\n This is a helper function to support the deprecated tuple order.\n XXX Remove in v0.22\n \"\"\"\n if not transformers:\n return True\n\n for t in transformers:\n if t in ('drop', 'passthrough'):\n continue\n if (not (hasattr(t, \"fit\") or hasattr(t, \"fit_transform\")) or not\n hasattr(t, \"transform\")):\n return False\n\n return True\n\n\ndef _is_deprecated_tuple_order(tuples):\n \"\"\"Checks if the input follows the deprecated tuple order.\n\n Returns\n -------\n Returns true if (transformer, columns) is not a valid assumption for the\n input, but (columns, transformer) is valid. The latter is deprecated and\n its support will stop in v0.22.\n\n XXX Remove in v0.22\n \"\"\"\n transformers, columns = zip(*tuples)\n if (not _validate_transformers(transformers)\n and _validate_transformers(columns)):\n return True\n\n return False\n\n\ndef _get_transformer_list(estimators):\n \"\"\"\n Construct (name, trans, column) tuples from list\n\n \"\"\"\n message = ('`make_column_transformer` now expects (transformer, columns) '\n 'as input tuples instead of (columns, transformer). This '\n 'has been introduced in v0.20.1. `make_column_transformer` '\n 'will stop accepting the deprecated (columns, transformer) '\n 'order in v0.22.')\n\n transformers, columns = zip(*estimators)\n\n # XXX Remove in v0.22\n if _is_deprecated_tuple_order(estimators):\n transformers, columns = columns, transformers\n warnings.warn(message, DeprecationWarning)\n\n names, _ = zip(*_name_estimators(transformers))\n\n transformer_list = list(zip(names, transformers, columns))\n return transformer_list\n\n\ndef make_column_transformer(*transformers, **kwargs):\n \"\"\"Construct a ColumnTransformer from the given transformers.\n\n This is a shorthand for the ColumnTransformer constructor; it does not\n require, and does not permit, naming the transformers. Instead, they will\n be given names automatically based on their types. It also does not allow\n weighting with ``transformer_weights``.\n\n Parameters\n ----------\n *transformers : tuples of transformers and column selections\n\n remainder : {'drop', 'passthrough'} or estimator, default 'drop'\n By default, only the specified columns in `transformers` are\n transformed and combined in the output, and the non-specified\n columns are dropped. (default of ``'drop'``).\n By specifying ``remainder='passthrough'``, all remaining columns that\n were not specified in `transformers` will be automatically passed\n through. This subset of columns is concatenated with the output of\n the transformers.\n By setting ``remainder`` to be an estimator, the remaining\n non-specified columns will use the ``remainder`` estimator. The\n estimator must support `fit` and `transform`.\n\n sparse_threshold : float, default = 0.3\n If the transformed output consists of a mix of sparse and dense data,\n it will be stacked as a sparse matrix if the density is lower than this\n value. Use ``sparse_threshold=0`` to always return dense.\n When the transformed output consists of all sparse or all dense data,\n the stacked result will be sparse or dense, respectively, and this\n keyword will be ignored.\n\n n_jobs : int or None, optional (default=None)\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n\n Returns\n -------\n ct : ColumnTransformer\n\n See also\n --------\n sklearn.compose.ColumnTransformer : Class that allows combining the\n outputs of multiple transformer objects used on column subsets\n of the data into a single feature space.\n\n Examples\n --------\n >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder\n >>> from sklearn.compose import make_column_transformer\n >>> make_column_transformer(\n ... (StandardScaler(), ['numerical_column']),\n ... (OneHotEncoder(), ['categorical_column']))\n ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS\n ColumnTransformer(n_jobs=None, remainder='drop', sparse_threshold=0.3,\n transformer_weights=None,\n transformers=[('standardscaler',\n StandardScaler(...),\n ['numerical_column']),\n ('onehotencoder',\n OneHotEncoder(...),\n ['categorical_column'])])\n\n \"\"\"\n # transformer_weights keyword is not passed through because the user\n # would need to know the automatically generated names of the transformers\n n_jobs = kwargs.pop('n_jobs', None)\n remainder = kwargs.pop('remainder', 'drop')\n sparse_threshold = kwargs.pop('sparse_threshold', 0.3)\n if kwargs:\n raise TypeError('Unknown keyword arguments: \"{}\"'\n .format(list(kwargs.keys())[0]))\n transformer_list = _get_transformer_list(transformers)\n return ColumnTransformer(transformer_list, n_jobs=n_jobs,\n remainder=remainder,\n sparse_threshold=sparse_threshold)\n"
] |
[
[
"numpy.hstack",
"scipy.sparse.issparse",
"numpy.arange",
"numpy.issubdtype",
"scipy.sparse.hstack",
"numpy.zeros"
]
] |
charlesastaylor/qiskit-terra
|
[
"8670669171bae8d619b6eaee6d08f8630e99f012"
] |
[
"qiskit/quantum_info/states/_states.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name,anomalous-backslash-in-string\n\n\"\"\"\nA collection of useful quantum information functions for states.\n\n\n\"\"\"\n\nimport numpy as np\nfrom qiskit import QISKitError\n\n\ndef basis_state(str_state, num):\n \"\"\"\n Return a basis state ndarray.\n\n Args:\n str_state (string): a string representing the state.\n num (int): the number of qubits\n Returns:\n ndarray: state(2**num) a quantum state with basis basis state.\n Raises:\n QISKitError: if the dimensions is wrong\n \"\"\"\n n = int(str_state, 2)\n if num >= len(str_state):\n state = np.zeros(1 << num, dtype=complex)\n state[n] = 1\n return state\n else:\n raise QISKitError('size of bitstring is greater than num.')\n\n\ndef random_state(num):\n \"\"\"\n Return a random quantum state from the uniform (Haar) measure on\n state space.\n\n Args:\n num (int): the number of qubits\n Returns:\n ndarray: state(2**num) a random quantum state.\n \"\"\"\n x = np.random.random(1 << num)+0.00000001\n x = -np.log(x)\n sumx = sum(x)\n phases = np.random.random(1 << num)*2.0*np.pi\n return np.sqrt(x/sumx)*np.exp(1j*phases)\n\n\ndef projector(state, flatten=False):\n \"\"\"\n maps a pure state to a state matrix\n\n Args:\n state (ndarray): the number of qubits\n flatten (bool): determine if state matrix of column work\n Returns:\n ndarray: state_mat(2**num, 2**num) if flatten is false\n ndarray: state_mat(4**num) if flatten is true stacked on by the column\n \"\"\"\n density_matrix = np.outer(state.conjugate(), state)\n if flatten:\n return density_matrix.flatten(order='F')\n return density_matrix\n"
] |
[
[
"numpy.log",
"numpy.random.random",
"numpy.sqrt",
"numpy.exp",
"numpy.zeros"
]
] |
rosdyana/Efflux-ATP-Binding
|
[
"7ed591e00946301f900574faba6fda8b7ee812fc"
] |
[
"resnet18.py"
] |
[
"import tensorflow as tf # uncomment this for using GPU\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n\nimport math\nimport json\nimport sys\n\nimport keras\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Flatten, Activation, add\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras import initializers\nfrom keras.engine import Layer, InputSpec\nfrom keras import backend as K\nfrom keras.utils import np_utils\nfrom keras.optimizers import *\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix, classification_report, auc, roc_curve\nimport argparse\n\nimport time\nfrom datetime import timedelta, datetime\n\n\ndef changeLabel(label):\n for i in range(len(label)):\n if label[i] == 2:\n label[i] = 0\n\n\ndef labelToOneHot(label): # 0--> [1 0], 1 --> [0 1]\n label = label.reshape(len(label), 1)\n label = np.append(label, label, axis=1)\n label[:, 0] = label[:, 0] == 0\n return label\n\n\ndef dataPreprocessing(dataFile, windowsize):\n # data pre-processing\n data = pd.read_csv(dataFile, header=None)\n X = data.iloc[:, 0:windowsize * 20].values\n y = data.iloc[:, windowsize * 20].values\n X = X.reshape(len(X), windowsize, 20, 1)\n changeLabel(y)\n return X, y\n\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block):\n \"\"\"The identity block is the block that has no conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n # Returns\n Output tensor for the block.\n \"\"\"\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size,\n padding='same', name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n x = add([x, input_tensor])\n x = Activation('relu')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\n \"\"\"A block that has a conv layer at shortcut.\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n strides: Strides for the first conv layer in the block.\n # Returns\n Output tensor for the block.\n Note that from stage 3,\n the first conv layer at main path is with strides=(2, 2)\n And the shortcut should have strides=(2, 2) as well\n \"\"\"\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = Conv2D(filters1, (1, 1), strides=strides,\n name=conv_name_base + '2a')(input_tensor)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters2, kernel_size, padding='same',\n name=conv_name_base + '2b')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)\n x = Activation('relu')(x)\n\n x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)\n x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)\n\n shortcut = Conv2D(filters3, (1, 1), strides=strides,\n name=conv_name_base + '1')(input_tensor)\n shortcut = BatchNormalization(\n axis=bn_axis, name=bn_name_base + '1')(shortcut)\n\n x = add([x, shortcut])\n x = Activation('relu')(x)\n return x\n\n\ndef build_model(SHAPE, nb_classes, bn_axis, seed=None):\n if seed:\n np.random.seed(seed)\n\n input_layer = Input(shape=SHAPE)\n\n x = ZeroPadding2D((3, 3))(input_layer)\n x = Conv2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n # print(\"x nya {}\".format(x))\n # x = AveragePooling2D((7, 7), name='avg_pool')(x)\n\n x = Flatten()(x)\n x = Dense(nb_classes, activation='softmax', name='fc10')(x)\n\n model = Model(input_layer, x)\n\n return model\n\n\ndef main():\n start_time = time.monotonic()\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--dimension',\n help='a image dimension', type=int, default=20)\n parser.add_argument('-c', '--channel',\n help='a image channel', type=int, default=1)\n parser.add_argument('-e', '--epochs',\n help='num of epochs', type=int, default=100)\n parser.add_argument('-b', '--batch_size',\n help='num of batch_size', type=int, default=64)\n parser.add_argument('-p', '--dataset_prefix',\n help='Dataset prefix', default=\"20\")\n parser.add_argument('-o', '--output',\n help='a result file', type=str, default=\"hasilnya18.txt\")\n args = parser.parse_args()\n # dimensions of our images.\n img_width, img_height = args.dimension, args.dimension\n channel = args.channel\n epochs = args.epochs\n batch_size = args.batch_size\n SHAPE = (17, 20, channel)\n bn_axis = 3 if K.image_dim_ordering() == 'tf' else 1\n\n print(\"loading dataset\")\n dataset_prefix = args.dataset_prefix\n dataset_training = \"similar{}training.csv\".format(dataset_prefix)\n dataset_validation = \"similar{}validation.csv\".format(dataset_prefix)\n dataset_independent = \"similar{}independent.csv\".format(dataset_prefix)\n windowsize = 17\n X_train, Y_train = dataPreprocessing(dataset_training, windowsize)\n X_val, Y_val = dataPreprocessing(dataset_validation, windowsize)\n X_ind, Y_ind = dataPreprocessing(dataset_independent, windowsize)\n\n Y_train = labelToOneHot(Y_train)\n Y_val = labelToOneHot(Y_val)\n Y_ind = labelToOneHot(Y_ind)\n nb_classes = 2\n\n model = build_model(SHAPE, nb_classes, bn_axis)\n\n model.compile(optimizer=Adam(lr=1.0e-4),\n loss='categorical_crossentropy', metrics=['accuracy'])\n\n # Fit the model\n model.fit(X_train, Y_train, batch_size=batch_size,\n epochs=epochs, validation_data=(X_val, Y_val))\n\n # Save Model or creates a HDF5 file\n model.save('resnet18_model.h5', overwrite=True)\n # del model # deletes the existing model\n predicted = model.predict(X_ind)\n y_pred = np.argmax(predicted, axis=1)\n Y_ind = np.argmax(Y_ind, axis=1)\n cm = confusion_matrix(Y_ind, y_pred)\n report = classification_report(Y_ind, y_pred)\n tn = cm[0][0]\n fn = cm[1][0]\n tp = cm[1][1]\n fp = cm[0][1]\n if tp == 0:\n tp = 1\n if tn == 0:\n tn = 1\n if fp == 0:\n fp = 1\n if fn == 0:\n fn = 1\n TPR = float(tp) / (float(tp) + float(fn))\n FPR = float(fp) / (float(fp) + float(tn))\n _fpr, _tpr, _threshold = roc_curve(Y_ind, y_pred)\n AUC = auc(_fpr, _tpr)\n accuracy = round((float(tp) + float(tn)) / (float(tp) +\n float(fp) + float(fn) + float(tn)), 3)\n specitivity = round(float(tn) / (float(tn) + float(fp)), 3)\n sensitivity = round(float(tp) / (float(tp) + float(fn)), 3)\n mcc = round((float(tp) * float(tn) - float(fp) * float(fn)) / math.sqrt(\n (float(tp) + float(fp))\n * (float(tp) + float(fn))\n * (float(tn) + float(fp))\n * (float(tn) + float(fn))\n ), 3)\n\n f_output = open(args.output, 'a')\n f_output.write('=======\\n')\n f_output.write(\"{}\\n\".format(datetime.now))\n f_output.write('TN: {}\\n'.format(tn))\n f_output.write('FN: {}\\n'.format(fn))\n f_output.write('TP: {}\\n'.format(tp))\n f_output.write('FP: {}\\n'.format(fp))\n f_output.write('TPR: {}\\n'.format(TPR))\n f_output.write('FPR: {}\\n'.format(FPR))\n f_output.write('AUC: {}\\n'.format(AUC))\n f_output.write('accuracy: {}\\n'.format(accuracy))\n f_output.write('specitivity: {}\\n'.format(specitivity))\n f_output.write(\"sensitivity : {}\\n\".format(sensitivity))\n f_output.write(\"mcc : {}\\n\".format(mcc))\n f_output.write(\"{}\".format(report))\n f_output.write('=======\\n')\n f_output.close()\n end_time = time.monotonic()\n print(\"Duration : {}\".format(timedelta(seconds=end_time - start_time)))\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_csv",
"numpy.random.seed",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.roc_curve",
"tensorflow.ConfigProto",
"numpy.append",
"numpy.argmax",
"tensorflow.Session",
"sklearn.metrics.auc",
"sklearn.metrics.classification_report"
]
] |
foamliu/Visual-Question-Answering
|
[
"f1714f7a2fd5dd66f6c9d5c4e51ece787112b9bc"
] |
[
"config.py"
] |
[
"import torch\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Configure models\nim_size = 448\nhidden_size = 512\n\n# Configure training/optimization\nprint_freq = 100\nbatch_size = 64\nteacher_forcing_ratio = 0.5\nclip = 50.0\n\nPAD_token = 0\nEOS_token = 1\nSOS_token = 2\n\ntrain_folder = 'data/train2014'\nvalid_folder = 'data/val2014'\ntest_folder = 'data/test2015'\n\nqa_json = 'data/FM-CH-QA.json'\npickle_file = 'data/data.pkl'\n"
] |
[
[
"torch.cuda.is_available"
]
] |
SKFnordquist/permute
|
[
"e2b13ed7d5508c65af81d59f158929afbc5419bc"
] |
[
"permute/tests/test_irr.py"
] |
[
"from __future__ import (absolute_import, division,\n print_function, unicode_literals)\n\nfrom nose.plugins.attrib import attr\nfrom nose.tools import raises\n\nimport numpy as np\nfrom numpy.testing import (assert_equal,\n assert_almost_equal)\n\nfrom ..irr import (compute_ts,\n simulate_ts_dist,\n simulate_npc_dist)\n\nfrom ..data import nsgk\n\nR = 10\nNs = 35\n\nfrom numpy.random import RandomState\nRNG = RandomState(42)\nres = RNG.binomial(1, .5, (R, Ns))\n\n\ndef test_irr():\n rho_s = compute_ts(res)\n assert_almost_equal(rho_s, 0.51936507)\n #res = spt(group, condition, response, iterations=1000)\n #res1 = spt(group, condition, response, iterations=1000)\n #assert_less(res[1], 0.01)\n #assert_almost_equal(res[3], res1[3])\n\n\ndef test_simulate_ts_dist():\n expected_res1 = {'dist': None,\n 'geq': 624,\n 'obs_ts': 0.51936507936507936,\n 'pvalue': 0.0624,\n 'num_perm': 10000}\n res1 = simulate_ts_dist(res, seed=42)\n assert_equal(res1, expected_res1)\n expected_res2 = {'geq': 9457,\n 'obs_ts': 0.46285714285714286,\n 'num_perm': 10000}\n res2 = simulate_ts_dist(res[:5], seed=42, keep_dist=True)\n assert_equal(res2['geq'], expected_res2['geq'])\n assert_equal(res2['obs_ts'], expected_res2['obs_ts'])\n assert_equal(res2['num_perm'], expected_res2['num_perm'])\n assert_equal(res2['dist'].shape, (10000,))\n\n\n@attr('slow')\ndef test_with_naomi_data():\n \"\"\" Test irr functionality using Naomi data.\"\"\"\n x = nsgk()\n t = x[1]\n y = t[0]\n res = simulate_ts_dist(y, num_perm=10, keep_dist=True, seed=42)\n expected_res = {'dist': np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]),\n 'geq': 10,\n 'num_perm': 10,\n 'pvalue': 1,\n 'obs_ts': 1.0}\n assert_equal(res, expected_res)\n\n\nfreq = RNG.choice([0.2, 0.8], Ns)\nres2 = np.zeros((R, Ns))\n\nfor i in range(len(freq)):\n res2[:, i] = RNG.binomial(1, freq[i], R)\n\n\ndef test_irr_concordance():\n rho_s2 = compute_ts(res2)\n assert_almost_equal(rho_s2, 0.70476190476190481)\n\n\ndef test_simulate_ts_dist_concordance():\n expected_res_conc = {'dist': None,\n 'geq': 0,\n 'obs_ts': 0.70476190476190481,\n 'pvalue': 0.0,\n 'num_perm': 10000}\n res_conc = simulate_ts_dist(res2, seed=42)\n assert_equal(res_conc, expected_res_conc)\n\n\nres1 = simulate_ts_dist(res, keep_dist=True, seed=42)\nres_conc = simulate_ts_dist(res2, keep_dist=True, seed=42)\ntrue_pvalue = np.array(\n [res1['geq'] / res1['num_perm'], res_conc['geq'] / res_conc['num_perm']])\nrho_perm = np.transpose(np.vstack((res1['dist'], res_conc['dist'])))\n\n\ndef test_simulate_npc_dist():\n expected_npc_res = {'num_perm': 10000,\n 'obs_npc': -0.010547525099011886,\n 'pvalue': 0.0016}\n obs_npc_res = simulate_npc_dist(\n rho_perm, size=np.array([Ns, Ns]), pvalues=true_pvalue)\n assert_equal(obs_npc_res, expected_npc_res)\n\n\n@raises(ValueError)\ndef test_simulate_npc_error():\n simulate_npc_dist(rho_perm, size=np.array([Ns, Ns]))\n\n\ndef test_simulate_npc_perfect():\n mat1 = np.tile(np.array([1, 0, 1, 0, 0]), (5, 1))\n mat2 = np.tile(np.array([0, 1, 0]), (5, 1))\n videos = [mat1, mat2]\n time_stamps = np.array([5, 3])\n d = [] # list of the permutation distributions for each video\n tst = [] # list of test statistics for each video\n pval = []\n for j in range(len(videos)): # loop over videos\n res = simulate_ts_dist(videos[j], keep_dist=True, seed=5)\n d.append(res['dist'])\n tst.append(res['obs_ts'])\n pval.append(res['pvalue'])\n perm_distr = np.asarray(d).transpose()\n overall1 = simulate_npc_dist(\n perm_distr, size=time_stamps, pvalues=np.array(pval))\n overall2 = simulate_npc_dist(\n perm_distr, size=time_stamps, obs_ts=tst)\n expected_overall = {'num_perm': 10000,\n 'obs_npc': -0.0076080098859340932,\n 'pvalue': 0.0}\n assert_equal(overall1, expected_overall)\n assert_equal(overall2, expected_overall)\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.asarray",
"numpy.vstack",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] |
NathanHess6/molecool
|
[
"b65eeaee1ff085d2b4a0d42f6cbf868a70f10e2f"
] |
[
"molecool/measure.py"
] |
[
"# have to import because you need it!\nimport numpy as np\n\ndef calculate_distance(pointA, pointB):\n \"\"\"\n This function calculates the distance between two points.\n\n Parameters\n ----------\n pointA, pointB: np.ndarray\n The coordinates of each point.\n\n Returns\n -------\n distance : float\n The distance between two points.\n\n Examples\n --------\n >>> r1 = np.array([0,0,0])\n >>> r2 = np.array([3,0,0])\n >>> calculate_distance(r1,r2)\n 3.0\n \"\"\"\n \n dist_vec = (pointA - pointB)\n distance = np.linalg.norm(dist_vec)\n return distance\n\ndef calculate_angle(pointA, pointB, pointC, degrees = False):\n # Calculate the angle between three points. Answer is given in radians by default, but can be given in degrees\n # by setting degrees = True\n AB = pointB - pointA\n BC = pointB - pointC\n theta = np.arccos(np.dot(AB, BC) / (np.linalg.norm(AB) * np.linalg.norm(BC)))\n\n if degrees:\n return np.degrees(theta)\n else:\n return theta\n"
] |
[
[
"numpy.dot",
"numpy.degrees",
"numpy.linalg.norm"
]
] |
mryab/efficient-dl-systems
|
[
"847bc8fc73357d45fb653b40c3232a02778a55a2"
] |
[
"week09_experiment_tracking/example_project/compute_metrics.py"
] |
[
"import json\nfrom argparse import ArgumentParser\n\nimport torch\nimport torchvision.transforms as transforms\nimport wandb\nfrom torchvision.datasets import CIFAR10\nfrom torchvision.models import resnet18\n\nfrom hparams import config\n\n\ndef main(args):\n api = wandb.Api()\n run = api.run(f\"mryab/effdl_example/{args.run_id}\")\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))\n ])\n\n test_dataset = CIFAR10(root='CIFAR10/test',\n train=False,\n transform=transform,\n download=False,\n )\n\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=config[\"batch_size\"])\n\n device = torch.device(\"cuda\")\n\n model = resnet18(pretrained=False, num_classes=10)\n model.load_state_dict(torch.load(\"model.pt\"))\n model.to(device)\n\n correct = 0.0\n\n for test_images, test_labels in test_loader:\n test_images = test_images.to(device)\n test_labels = test_labels.to(device)\n\n with torch.inference_mode():\n outputs = model(test_images)\n preds = torch.argmax(outputs, 1)\n correct += (preds == test_labels).sum()\n\n accuracy = correct / len(test_dataset)\n\n run.summary[\"accuracy\"] = accuracy\n run.summary.update()\n\n with open(\"final_metrics.json\", \"w+\") as f:\n json.dump({\"accuracy\": accuracy.item()}, f)\n print(\"\\n\", file=f)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--run-id', required=True)\n args = parser.parse_args()\n main(args)\n"
] |
[
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.inference_mode",
"torch.device",
"torch.argmax"
]
] |
tsvilans/tas
|
[
"725adfc525d4d896566841328b6b13adf1f28e5e"
] |
[
"python/tas/operators/bpy_merge_images_to_scan.py"
] |
[
"import bpy\nimport numpy as np\nimport math\nimport os\n\nfrom mathutils import Vector\nfrom .bpy_pcd_convert import pcdInterface\nfrom tas.util import flatten\n\n'''\nsrc_dir: source directory where images are stored\nposition_name: basename (with extension) of position image\ncolor_name: basename (with extension) of color image\nout_dir: directory to output to\nname: basename of output file (with '.pcd' extension)\n'''\n\ndef merge_to_scan(src_dir, position_name, color_name, out_dir, name, color_gain=1.0):\n print(\"Merging...\")\n\n scn = bpy.context.scene\n\n pos_path = os.path.join(src_dir, position_name)\n col_path = os.path.join(src_dir, color_name)\n\n if not os.path.exists(pos_path) or not os.path.exists(col_path):\n print (\"Failed to find images to merge.\")\n return\n\n if position_name not in bpy.data.images:\n bpy.data.images.load(pos_path)\n else:\n bpy.data.images[position_name].reload()\n\n if color_name not in bpy.data.images:\n bpy.data.images.load(col_path)\n else:\n bpy.data.images[color_name].reload()\n\n imgPos = bpy.data.images[position_name]\n imgCol = bpy.data.images[color_name]\n\n pxPosition = np.array(imgPos.pixels)\n pxColor = np.array(imgCol.pixels)\n\n w = imgPos.size[0]\n h = imgPos.size[1]\n\n imPosition = pxPosition.reshape(h, w, imgPos.channels)\n imColor = pxColor.reshape(h, w, imgCol.channels)\n\n imPosition = np.delete(imPosition, 3, 2)\n imColor = np.delete(imColor, 3, 2)\n imColor = imColor * color_gain\n imColor = imColor.clip(0,1.0)\n imColor = imColor * 255\n imColor = imColor.astype(np.uint8)\n\n\n if not name.endswith('.pcd'):\n name = name + '.pcd'\n\n pcd = pcdInterface()\n pcd.export_color_pointcloud(os.path.join(out_dir, name), imPosition, imColor)\n\n print(\"Merged %s and %s into %s\" % (position_name, color_name, name))\n\n\n\n\n"
] |
[
[
"numpy.delete",
"numpy.array"
]
] |
NLPrinceton/ALaCarte
|
[
"abe082f049633cc8dd958945e77bcd90fcda2946"
] |
[
"compute.py"
] |
[
"import sys\nfrom collections import Counter\nfrom collections import defaultdict\nfrom itertools import chain\nimport nltk\nimport numpy as np\nfrom scipy import sparse as sp\nfrom sklearn.linear_model import LinearRegression as LR\n\n\nFLOAT = np.float32\nINT = np.int32\n\n\ndef ngram_context(strdoc, intdoc, vocabulary, n=1, wndo2=5, unkgram=None):\n '''sliding window around n-grams in a document\n Args:\n strdoc: list of tokens (as strings)\n intdoc: list of indices (as ints); len(intdoc) == len(strdoc)\n vocabulary: n-gram vocabulary (set of n-grams or dict with n-grams as keys)\n n: n in n-gram\n wndo2: half the window size\n unkgram: map n-grams not in vocabulary to this n-gram; if None does not yield such n-grams\n Returns:\n (n-gram, int generator) generator over (n-gram, context window pairs)\n '''\n\n wndo2pn = wndo2+n\n unk = not unkgram is None\n for i, ngram in enumerate(nltk.ngrams(strdoc, n)):\n if ngram in vocabulary:\n yield ngram, chain(intdoc[max(i-wndo2, 0):i], intdoc[i+n:i+wndo2pn])\n elif unk:\n yield unkgram, chain(intdoc[max(i-wndo2, 0):i], intdoc[i+n:i+wndo2pn])\n\n\ndef counts2mat(featcoocs, featlist, shape, dtype):\n '''computes matrix from feature-word cooccurrence counts\n Args:\n featcoocs: dict mapping features to Counters\n featlist: list of features\n shape: matrix shape\n dtype: dtype of matrix\n Returns:\n sparse matrix in CSR format\n '''\n\n rows, cols, values = zip(*((i, j, count) for i, feat in enumerate(featlist) for j, count in featcoocs[feat].items()))\n return sp.coo_matrix((values, (rows, cols)), shape=shape, dtype=dtype).tocsr()\n\n\ndef cooc_matrix(corpus, featlist, wordlist, doc2wnd=ngram_context, unk=None, overlap=False, avg=False, wei=False, interval=1000000, verbose=False, comm=None, **kwargs):\n '''constructs feature, word cooccurrence matrix\n Args:\n corpus: iterable of lists of strings\n featlist: list of hashable features\n wordlist: list of strings\n doc2wnd: takes list of tokens, list of indices, and set of features and returns a (feature, index iterable) generator\n unk: map words not in wordlist to this token (must be in wordlist); if None excludes OOV words\n overlap: if True subtracts feature count from cooccurrence of feature with any word it contains; features must be iterable\n avg: uses average over window size rather than cooccurrence counts\n wei: weight co-occurring words by distance from window\n interval: number of documents between conversion to sparse matrix\n verbose: write context matrix construction progress\n comm: MPI Communicator; outputs are None for non-root processes\n kwargs: passed to doc2wnd\n Returns:\n cooccurrence matrix in CSR format, vector of feature counts, vector of word counts\n '''\n\n assert not (overlap and (avg or wei)), \"correcting for overlap not compatible with averaging or weighting\"\n\n featset = set(featlist)\n featcounts = Counter()\n F = len(featlist)\n unki = -1 if unk is None else wordlist.index(unk)\n word2index = {word: i for i, word in enumerate(wordlist)}\n wordcounts = Counter()\n V = len(wordlist)\n\n rank, size = (0, 1) if comm is None else (comm.rank, comm.size)\n write = lambda msg: sys.stdout.write(msg) and sys.stdout.flush()\n dtype = FLOAT if (avg or wei) else INT\n if not rank:\n matrix = sp.csr_matrix((F, V), dtype=dtype)\n featcoocs = defaultdict(lambda: Counter())\n\n for i, doc in enumerate(corpus):\n if i%size == rank:\n indices = [word2index.get(word, unki) for word in doc]\n wordcounts.update(indices)\n if avg:\n for feat, window in doc2wnd(doc, indices, featset, **kwargs):\n window = list(window)\n if window:\n increment = 1.0/len(window)\n cooccounts = featcoocs[feat]\n for index in window:\n cooccounts[index] += increment\n featcounts[feat] += 1\n elif wei:\n for feat, window in doc2wnd(doc, indices, featset, **kwargs):\n window = list(window)\n if window:\n length = len(window)\n half = int(length/2)\n recip = 1.0/length\n cooccounts = featcoocs[feat]\n for j, index in enumerate(window[:half]):\n cooccounts[index] += recip/(half-j)\n for j, index in enumerate(window[half:]):\n cooccounts[index] += recip/(j+1)\n featcounts[feat] += 1\n else:\n for feat, window in doc2wnd(doc, indices, featset, **kwargs):\n featcoocs[feat].update(window)\n featcounts[feat] += 1\n if not (i+1)%interval:\n if rank:\n comm.send(counts2mat(featcoocs, featlist, (F, V), dtype), dest=0)\n else:\n matrix += sum((comm.recv(source=j) for j in range(1, size)), counts2mat(featcoocs, featlist, (F, V), dtype))\n if verbose:\n write('\\rProcessed '+str(i+1)+' Documents; Sparsity: '+str(matrix.nnz)+'/'+str(F*V)+'; Coverage: '+str((matrix.sum(1)>0).sum())+'/'+str(F))\n featcoocs = defaultdict(lambda: Counter())\n\n if size > 1:\n featcounts = comm.reduce(featcounts, root=0)\n wordcounts = comm.reduce(wordcounts, root=0)\n if rank:\n comm.send(counts2mat(featcoocs, featlist, (F, V), dtype), dest=0)\n return 3*[None]\n matrix += sum((comm.recv(source=j) for j in range(1, size)), counts2mat(featcoocs, featlist, (F, V), dtype))\n\n if overlap:\n for feat, coocs in featcoocs.items():\n count = featcounts[feat]\n for word in feat:\n index = word2index.get(word)\n if not index is None:\n coocs[index] -= count\n if verbose:\n write('\\rProcessed '+str(i+1)+' Documents; Sparsity: '+str(matrix.nnz)+'/'+str(F*V)+'; Coverage: '+str((matrix.sum(1)>0).sum())+'/'+str(F)+'\\n')\n return matrix, np.array([featcounts[feat] for feat in featlist], dtype=INT), np.array([wordcounts[word2index[word]] for word in wordlist], dtype=INT)\n\n\ndef symmetric_cooc_matrix(corpus, wordlist, unk=None, **kwargs):\n '''constructs symmetric word, word cooccurrence matrix\n Args:\n corpus: iterable of lists of strings\n wordlist: list of strings\n unk: map words not in wordlist to this token (must be in wordlist); if None excludes OOV words\n kwargs: passed to cooc_matrix\n Returns:\n cooccurrence matrix in CSR format, vector of word counts\n '''\n\n unkgram = None if unk is None else (unk,)\n return cooc_matrix(corpus, [(word,) for word in wordlist], wordlist, unk=unk, n=1, unkgram=unkgram, **kwargs)[:2]\n\n\ndef linear_transform(cooc_matrix, word_embeddings, word_counts, Regression=LR, weights=None, **kwargs):\n '''learns linear transform from context vectors to original embeddings\n Args:\n cooc_matrix: cooccurrence matrix of size (V, V)\n word_embeddings: embedding matrix of size (V, d)\n word_counts: word count vector of length V\n Regression: regression class (from sklearn.linear_model)\n weights: sample weight vector of length V; ignored if None\n kwargs: passed to Regression\n Returns:\n fitted Regression object\n '''\n\n select = word_counts > 0\n if not weights is None:\n select *= weights > 0\n weights = weights[select]\n\n return Regression(**kwargs).fit(cooc_matrix[select].dot(word_embeddings) / word_counts[select,None], word_embeddings[select], weights)\n"
] |
[
[
"scipy.sparse.coo_matrix",
"numpy.array",
"scipy.sparse.csr_matrix"
]
] |
kienduynguyen/BoxeR
|
[
"a7d9456141e9fb4f6da53c961bda54886024ee75"
] |
[
"e2edet/module/box3d_transformer.py"
] |
[
"import math\n\nimport torch\nfrom torch import nn\n\nfrom .box_attention import Box3dAttention\nfrom e2edet.utils.general import (\n flatten_with_shape,\n inverse_sigmoid,\n get_clones,\n get_activation_fn,\n get_proposal_pos_embed,\n normalize_period,\n)\n\n\nclass Box3dTransformer(nn.Module):\n def __init__(\n self,\n d_model=256,\n nhead=8,\n nlevel=4,\n num_encoder_layers=6,\n num_decoder_layers=6,\n dim_feedforward=1024,\n dropout=0.1,\n activation=\"relu\",\n num_queries=300,\n ref_size=4,\n ):\n super().__init__()\n\n encoder_layer = Box3dTransformerEncoderLayer(\n d_model, nhead, nlevel, dim_feedforward, dropout, activation\n )\n\n self.encoder = Box3dTransformerEncoder(\n d_model, encoder_layer, num_encoder_layers, num_queries\n )\n\n decoder_layer = Box3dTransformerDecoderLayer(\n d_model, nhead, nlevel, dim_feedforward, dropout, activation\n )\n\n self.decoder = Box3dTransformerDecoder(\n d_model, decoder_layer, num_decoder_layers\n )\n\n self.ref_size = ref_size\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n for m in self.modules():\n if isinstance(m, Box3dAttention):\n m._reset_parameters()\n\n def _create_ref_windows(self, tensor_list):\n angle_ratio = torch.FloatTensor(\n [\n 0,\n 2 * math.pi / 3,\n -2 * math.pi / 3,\n 0,\n 2 * math.pi / 3,\n -2 * math.pi / 3,\n 0,\n 2 * math.pi / 2,\n ]\n ).to(tensor_list[0])\n angle_ratio = normalize_period(angle_ratio, offset=0.5, period=math.pi * 2)\n h_ratio = torch.FloatTensor([self.ref_size] * 8).to(tensor_list[0])\n w_ratio = torch.FloatTensor([self.ref_size] * 8).to(tensor_list[0])\n\n ref_windows = []\n\n for tensor in tensor_list:\n b, _, h, w = tensor.shape\n\n y_embed = torch.arange(h, device=tensor.device, dtype=tensor.dtype) + 0.5\n y_embed = y_embed / h\n x_embed = torch.arange(w, device=tensor.device, dtype=tensor.dtype) + 0.5\n x_embed = x_embed / w\n\n y_embed, x_embed = torch.meshgrid(y_embed, x_embed, indexing=\"ij\")\n\n y_embed = y_embed.unsqueeze(0).expand(b, -1, -1)\n x_embed = x_embed.unsqueeze(0).expand(b, -1, -1)\n\n h_embed = torch.ones_like(y_embed).unsqueeze(-1) * h_ratio / h\n w_embed = torch.ones_like(x_embed).unsqueeze(-1) * w_ratio / w\n angle_embed = torch.ones_like(x_embed).unsqueeze(-1) * angle_ratio\n\n x_embed = x_embed.unsqueeze(-1).expand_as(angle_embed)\n y_embed = y_embed.unsqueeze(-1).expand_as(angle_embed)\n\n ref_box = torch.stack(\n [x_embed, y_embed, w_embed, h_embed, angle_embed], dim=-1\n ).flatten(1, 2)\n\n ref_windows.append(ref_box)\n\n ref_windows = torch.cat(ref_windows, dim=1)\n\n return ref_windows\n\n def forward(self, src, pos):\n assert pos is not None, \"position encoding is required!\"\n src_pos = []\n src_ref_windows = self._create_ref_windows(src)\n src, _, src_shape = flatten_with_shape(src, None)\n\n for pe in pos:\n b, c = pe.shape[:2]\n pe = pe.view(b, c, -1).transpose(1, 2)\n src_pos.append(pe)\n src_pos = torch.cat(src_pos, dim=1)\n src_start_index = torch.cat(\n [src_shape.new_zeros(1), src_shape.prod(1).cumsum(0)[:-1]]\n )\n\n output = self.encoder(src, src_pos, src_shape, src_start_index, src_ref_windows)\n out_embed, dec_embed, dec_ref_windows, dec_pos = output\n\n hs = self.decoder(\n dec_embed, dec_pos, out_embed, src_shape, src_start_index, dec_ref_windows\n )\n\n return hs, dec_ref_windows, out_embed, src_ref_windows\n\n\nclass Box3dTransformerEncoder(nn.Module):\n def __init__(self, d_model, encoder_layer, num_layers, num_queries):\n super().__init__()\n self.layers = get_clones(encoder_layer, num_layers)\n\n self.detector = None\n self.num_queries = num_queries\n self.d_model = d_model\n self.enc_linear = nn.Sequential(\n nn.Linear(d_model, d_model), nn.LayerNorm(d_model)\n )\n\n def _get_enc_proposals(self, output, ref_windows):\n b, l = output.shape[:2]\n output_embed = output\n\n tmp_ref_windows = self.detector[0].bbox_embed(output_embed)\n num_references = self.detector[0].num_references\n\n tmp_ref_windows = tmp_ref_windows.view(b, l, num_references, 7)\n ref_windows = ref_windows[..., :num_references, :]\n\n tmp_ref_box, tmp_ref_height = tmp_ref_windows.split((5, 2), dim=-1)\n tmp_ref_box = tmp_ref_box + inverse_sigmoid(ref_windows)\n out_ref_windows = torch.cat([tmp_ref_box, tmp_ref_height], dim=-1).sigmoid()\n out_ref_windows = out_ref_windows.view(b, l * num_references, 7)\n\n ref_windows_valid = (\n (ref_windows[..., :2] > 0.001) & (ref_windows[..., :2] < 0.999)\n ).all(-1)\n src_mask = ~ref_windows_valid\n\n out_logits = (\n self.detector[0]\n .class_embed(output_embed)\n .view(b, l, num_references, -1)[..., 0]\n )\n out_logits = out_logits.masked_fill(src_mask, -65504.0)\n out_logits = out_logits.view(b, l * num_references)\n _, indexes = torch.topk(out_logits, self.num_queries, dim=1, sorted=False)\n\n indexes = indexes.unsqueeze(-1)\n out_ref_windows = torch.gather(\n out_ref_windows, 1, indexes.expand(-1, -1, out_ref_windows.shape[-1])\n )\n out_ref_windows = out_ref_windows.detach()\n\n pos = get_proposal_pos_embed(out_ref_windows[..., :2], self.d_model)\n size = get_proposal_pos_embed(out_ref_windows[..., 2:4], self.d_model)\n rad = get_proposal_pos_embed(out_ref_windows[..., [4, 4]], self.d_model)\n out_pos = pos + size + rad\n\n indexes = indexes.expand(-1, -1, output.shape[-1]).div(\n num_references, rounding_mode=\"floor\"\n )\n out_embed = torch.gather(output_embed, 1, indexes)\n out_embed = self.enc_linear(out_embed.detach())\n\n return out_embed, out_ref_windows, out_pos\n\n def forward(self, src, pos, src_shape, src_start_idx, ref_windows):\n output = src\n\n for layer in self.layers:\n output = layer(output, pos, src_shape, src_start_idx, ref_windows)\n\n out_embed, out_ref_windows, out_pos = self._get_enc_proposals(\n output, ref_windows\n )\n\n return output, out_embed, out_ref_windows, out_pos\n\n\nclass Box3dTransformerDecoder(nn.Module):\n def __init__(self, d_model, decoder_layer, num_layers):\n super().__init__()\n\n self.layers = get_clones(decoder_layer, num_layers)\n\n def forward(\n self, tgt, query_pos, memory, memory_shape, memory_start_idx, ref_windows\n ):\n output = tgt\n inter = []\n\n for layer in self.layers:\n output = layer(\n output, query_pos, memory, memory_shape, memory_start_idx, ref_windows\n )\n inter.append(output)\n\n return torch.stack(inter)\n\n\nclass Box3dTransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, nhead, nlevel, dim_feedforward, dropout, activation):\n super().__init__()\n self.self_attn = Box3dAttention(d_model, nlevel, nhead, with_rotation=False)\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = get_activation_fn(activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, src, pos, src_shape, src_start_idx, ref_windows):\n src2 = self.self_attn(\n self.with_pos_embed(src, pos),\n src,\n src_shape,\n None,\n src_start_idx,\n None,\n ref_windows,\n )\n src = src + self.dropout1(src2[0])\n src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n\n return src\n\n\nclass Box3dTransformerDecoderLayer(nn.Module):\n def __init__(self, d_model, nhead, nlevel, dim_feedforward, dropout, activation):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = Box3dAttention(d_model, nlevel, nhead, with_rotation=True)\n\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n\n self.dropout = nn.Dropout(dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = get_activation_fn(activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(\n self, tgt, query_pos, memory, memory_shape, memory_start_idx, ref_windows\n ):\n q = k = self.with_pos_embed(tgt, query_pos)\n q = q.transpose(0, 1)\n k = k.transpose(0, 1)\n v = tgt.transpose(0, 1)\n\n tgt2 = self.self_attn(q, k, v)[0]\n tgt2 = tgt2.transpose(0, 1)\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n tgt2 = self.multihead_attn(\n self.with_pos_embed(tgt, query_pos),\n memory,\n memory_shape,\n None,\n memory_start_idx,\n None,\n ref_windows,\n )[0]\n\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n\n return tgt\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.MultiheadAttention",
"torch.cat",
"torch.gather",
"torch.arange",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.nn.init.xavier_uniform_",
"torch.topk",
"torch.meshgrid",
"torch.ones_like",
"torch.stack"
]
] |
UTHSCSA-NAL/shrinkage
|
[
"388c35e21fb7dfdccb272533712b97f8e01e4581"
] |
[
"OASchart.py"
] |
[
"import os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n################################################################################\r\n\r\n# compute OAS intensity, \"tr\" denoting the squared Frobenius norm of the matrix: Tr(S*S)\r\ndef shrinkage(n,p,tr):\r\n\treturn min(1.0, ( (1.0-2.0/p)*tr+p*p )/( (n+1-2.0/p)*(tr-p) ) )\r\n \r\n# project a covariance matrix to produce a correlation matrix\r\ndef tocorr(m):\r\n\treturn np.minimum(1,np.maximum(-1,m-np.diag(np.diag(m))+np.eye(m.shape[0]) ))\r\n\r\n# normalize a set of time series to zero mean and unit L2 norm \r\ndef normalization(si):\t\r\n\ton=np.ones((1,si.shape[1]))\r\n\teps=0.000000001\r\n\tsi=si-np.mean(si,axis=1,keepdims=True).dot(on)\r\n\tsi=np.divide(si,np.maximum(np.sqrt(np.sum(si*si,axis=1,keepdims=True)).dot(on),eps*on) )\r\n\treturn si\t\r\n\r\n# compute a Pearson correlation matrix from a set of time series\r\ndef ts2corr(ts):\r\n\tts=normalization(ts)\r\n\treturn tocorr(ts.dot(np.transpose(ts)))\r\n\r\n# compute correlation matrix density\t\r\ndef corr2density(cr):\r\n\tp=cr.shape[0]\r\n\tv=np.sum(cr*cr)\r\n\treturn (v-p)/(p*p-p)\t\r\n\r\ndef myfmt(x):\r\n s = str(x)\r\n while s.endswith(\"0\"):\r\n \ts=s[:-1]\r\n return s\r\n\r\n\r\ndef makeColors(n):\r\n\trr=[]\r\n\tfor i in range(0,n):\r\n\t\tif (float(i)/float(n))<=0.5:\r\n\t\t\tr=0\r\n\t\t\tg=0\r\n\t\t\tb=int( 255.0*float(i)/float(n)*2.0 )\r\n\t\t\the='#%02x%02x%02x' % (r,g,b)\r\n\t\t\trr.append( he.upper() )\r\n\t\telse:\r\n\t\t\ts=(float(i)/float(n)-0.5)*2.0\r\n\t\t\tr=int(225.0*s)\r\n\t\t\tg=int(125.0*s)\r\n\t\t\tb=int(255.0*(1.0-s))\r\n\t\t\the='#%02x%02x%02x' % (r,g,b)\r\n\t\t\trr.append( he.upper() )\t\r\n\treturn rr[::-1]\r\n\t\r\n\r\ndef makeChart(p,n_min,n_max,d_min,d_max,levels,files,output):\r\n\t# OAS intensity on the grid\r\n\tresx=np.linspace(np.log(float(n_min)),np.log(float(n_max)),501) \r\n\tresx=np.exp(resx)\r\n\tresy=np.linspace(np.log(d_min),np.log(d_max),501)\r\n\tresy=np.exp(resy)\r\n\txs=np.zeros((len(resx)))\r\n\tys=np.zeros((len(resy)))\r\n\tzs=np.zeros((len(resy),len(resx)))\r\n\tfor x in range(0,len(resx)):\r\n\t\tn=int(resx[x])\r\n\t\txs[x]=n\r\n\tfor y in range(0,len(resy)):\r\n\t\tys[y]=resy[y]\t\r\n\tfor x in range(0,len(resx)):\r\n\t\tn=int(resx[x])\r\n\t\tfor y in range(0,len(resy)): \r\n\t\t\ttr=p*(1.0-resy[y])+p*p*resy[y]\r\n\t\t\tzs[y,x]=shrinkage(n,p,tr)\r\n\r\n\t# plot the chart\t\r\n\tplt.title('OAS intensity (p='+str(p)+')',fontsize=22)\r\n\tplt.xlabel('number of time points n',fontsize=18)\r\n\tplt.ylabel('Density',fontsize=18)\r\n\tplt.xscale('log')\r\n\tplt.yscale('log')\r\n\t\r\n\ttmp=args.levels.split(',')\r\n\tls=[]\r\n\tfor s in tmp:\r\n\t\tls.append(float(s))\r\n\tls.sort()\r\n\t\r\n\t\r\n\t# colors\r\n\tcols=makeColors(len(ls))\r\n\t\r\n\t# contours\r\n\tcontours = plt.contour(xs,ys,zs,levels=ls,colors=cols)\r\n\r\n\t# connectomes\r\n\tif len(files)>0:\r\n\t\txxs=[]\r\n\t\tyys=[]\r\n\t\tfor i in range(0,len(files)):\r\n\t\t\tdat=normalization(np.loadtxt(files[i]))\r\n\t\t\tcr=tocorr(dat.dot(np.transpose(dat)))\r\n\t\t\txxs.append(dat.shape[1])\r\n\t\t\tyys.append(corr2density(cr))\r\n\t\tplt.scatter(xxs,yys,marker='o',c=[1.0,0,0],s=np.ones((len(xxs)))*5.0 )\r\n\r\n\t# contour labels\r\n\tlocs=[]\r\n\tcpt=0\r\n\tfor line in contours.collections:\r\n\t\tfor path in line.get_paths():\r\n\t\t\tidxs=np.argsort(path.vertices[:,1])\r\n\t\t\tidx=idxs[int( len(idxs)*0.5 )]\r\n\t\t\tlocs.append(path.vertices[idx,:])\r\n\t\tcpt=cpt+1\r\n\tplt.clabel(contours, inline=1, fontsize=18,fmt=myfmt,inline_spacing=3, rightside_up=True,manual=locs)\r\n\tplt.xlim([np.min(resx),np.max(resx)])\r\n\t\r\n\taxes= plt.axes()\r\n\tplt.xticks(fontsize=16)\r\n\tplt.yticks(fontsize=16)\r\n\tplt.savefig(output,bbox_inches='tight')\r\n\r\n\r\n\r\n\r\ndef checkFiles(lis):\r\n\tr=[]\r\n\tfi=open(lis,'r')\r\n\ttmp=fi.readlines()\r\n\tfi.close()\r\n\tfor s in tmp:\r\n\t\twhile s.endswith('\\n') or s.endswith('\\r'):\r\n\t\t\ts=s[:-1]\r\n\t\tif os.path.exists(s):\r\n\t\t\tr.append(s)\r\n\t\telse:\r\n\t\t\tprint('ERROR: file do not exist '+s)\r\n\treturn r\r\n\t\t\r\n################################################################################\r\nif __name__ == \"__main__\":\r\n\tfrom argparse import ArgumentParser, RawTextHelpFormatter\r\n\tparser = ArgumentParser(description=\"OAS intensity charts (see publication)\",formatter_class=RawTextHelpFormatter)\r\n\tparser.add_argument(\"-d\", \"--dimension\",help=\"Dimension of the correlation matrices.\", required=True, type=int)\r\n\tparser.add_argument(\"-li\", \"--list\",help=\"List of files containing time series (to add corresponding Pearson correlation matrices to the chart). Not required.\", required=False,default='')\r\n\tparser.add_argument(\"-le\", \"--levels\",help=\"OAS intenstiy level sets to display, comma separated without space (default is 0.9,0.5,0.25,0.1,0.05,0.025,0.01,0.005)\", required=False,default='0.9,0.5,0.25,0.1,0.05,0.025,0.01,0.005')\t\r\n\tparser.add_argument(\"-o\", \"--output\",help=\"Output plot (default is OAS_chart.png)\",required=False,default='OAS_chart.png')\r\n\t\r\n\tparser.add_argument(\"--n_min\",help=\"Smallest number of time points (default is 15, should be larger than 10).\",required=False,default='15',type=int)\r\n\tparser.add_argument(\"--n_max\",help=\"Largest number of time points (default is 1500, should be larger than two times n_min).\",required=False,default='1500',type=int)\r\n\tparser.add_argument(\"--d_min\",help=\"Smallest density (default is 0.01, should be larger than 0).\",required=False,default='0.01',type=float)\r\n\tparser.add_argument(\"--d_max\",help=\"Largest density (default is 1.0, should be smaller than 1).\",required=False,default='1.0',type=float)\r\n\t\r\n\t\r\n\targs = parser.parse_args()\r\n\t\r\n\tif len(args.list)>0:\r\n\t\tif os.path.exists(args.list):\r\n\t\t\tfiles=checkFiles(args.list)\r\n\t\t\tmakeChart(args.dimension,args.n_min,args.n_max,args.d_min,args.d_max,args.levels,files,args.output)\r\n\t\telse:\r\n\t\t\tprint('ERROR: file do not exist '+args.list)\r\n\telse:\r\n\t\tprint('Chart without data point')\r\n\t\tmakeChart(args.dimension,args.n_min,args.n_max,args.d_min,args.d_max,args.levels,[],args.output)\r\n\t\t\r\n"
] |
[
[
"numpy.diag",
"matplotlib.pyplot.axes",
"numpy.max",
"numpy.mean",
"numpy.exp",
"numpy.eye",
"numpy.log",
"numpy.min",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"numpy.transpose",
"numpy.argsort",
"numpy.sum",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.yscale",
"numpy.ones",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.yticks",
"numpy.loadtxt"
]
] |
sbenthall/pycid
|
[
"114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf"
] |
[
"pycid/examples/story_macids.py"
] |
[
"import numpy as np\nfrom pgmpy.factors.discrete import TabularCPD\n\nfrom pycid.core.cpd import noisy_copy\nfrom pycid.core.macid import MACID\n\n\ndef prisoners_dilemma() -> MACID:\n \"\"\"MACIM representation of the canonical prisoner's dilemma.\n\n The prisoner's dilemma is a simultaneous symmetric two-player game\n with payoffs corresponding to the following normal form game -\n the row player is agent 1 and the column player is agent 2:\n\n +----------+----------+----------+\n | |Cooperate | Defect |\n +----------+----------+----------+\n |Cooperate | -1, -1 | -3, 0 |\n +----------+----------+----------+\n | Defect | 0, -3 | -2, -2 |\n +----------+----------+----------+\n\n This game has one pure NE: (defect, defect)\n \"\"\"\n macid = MACID(\n [(\"D1\", \"U1\"), (\"D1\", \"U2\"), (\"D2\", \"U2\"), (\"D2\", \"U1\")],\n agent_decisions={1: [\"D1\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U1\"], 2: [\"U2\"]},\n )\n\n d1_domain = [\"c\", \"d\"]\n d2_domain = [\"c\", \"d\"]\n agent1_payoff = np.array([[-1, -3], [0, -2]])\n agent2_payoff = np.transpose(agent1_payoff)\n\n macid.add_cpds(\n D1=d1_domain,\n D2=d2_domain,\n U1=lambda D1, D2: agent1_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n U2=lambda D1, D2: agent2_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n )\n return macid\n\n\ndef battle_of_the_sexes() -> MACID:\n \"\"\"MACIM representation of the battle of the sexes game.\n\n The battle of the sexes game (also known as Bach or Stravinsky)\n is a simultaneous symmetric two-player game with payoffs\n corresponding to the following normal form game -\n the row player is Female and the column player is Male:\n\n +----------+----------+----------+\n | |Opera | Football |\n +----------+----------+----------+\n | Opera | 3, 2 | 0, 0 |\n +----------+----------+----------+\n | Football | 0, 0 | 2, 3 |\n +----------+----------+----------+\n\n This game has two pure NE: (Opera, Football) and (Football, Opera)\n \"\"\"\n macid = MACID(\n [(\"D_F\", \"U_F\"), (\"D_F\", \"U_M\"), (\"D_M\", \"U_M\"), (\"D_M\", \"U_F\")],\n agent_decisions={\"M\": [\"D_F\"], \"F\": [\"D_M\"]},\n agent_utilities={\"M\": [\"U_F\"], \"F\": [\"U_M\"]},\n )\n\n d_f_domain = [\"O\", \"F\"]\n d_m_domain = [\"O\", \"F\"]\n agent_f_payoff = np.array([[3, 0], [0, 2]])\n agent_m_payoff = np.array([[2, 0], [0, 3]])\n\n macid.add_cpds(\n D_F=d_f_domain,\n D_M=d_m_domain,\n U_F=lambda D_F, D_M: agent_f_payoff[d_f_domain.index(D_F), d_m_domain.index(D_M)],\n U_M=lambda D_F, D_M: agent_m_payoff[d_f_domain.index(D_F), d_m_domain.index(D_M)],\n )\n return macid\n\n\ndef matching_pennies() -> MACID:\n \"\"\"MACIM representation of the matching pennies game.\n\n The matching pennies game is a symmetric two-player game\n with payoffs corresponding to the following normal form game -\n the row player is agent 1 and the column player is agent 2:\n\n +----------+----------+----------+\n | |Heads | Tails |\n +----------+----------+----------+\n | Heads | +1, -1 | -1, +1 |\n +----------+----------+----------+\n | Tails | -1, +1 | +1, -1 |\n +----------+----------+----------+\n\n This game has no pure NE, but has a mixed NE where\n each player chooses Heads or Tails with equal probability.\n \"\"\"\n macid = MACID(\n [(\"D1\", \"U1\"), (\"D1\", \"U2\"), (\"D2\", \"U2\"), (\"D2\", \"U1\")],\n agent_decisions={1: [\"D1\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U1\"], 2: [\"U2\"]},\n )\n\n d1_domain = [\"H\", \"T\"]\n d2_domain = [\"H\", \"T\"]\n agent1_payoff = np.array([[1, -1], [-1, 1]])\n agent2_payoff = np.array([[-1, 1], [1, -1]])\n\n macid.add_cpds(\n D1=d1_domain,\n D2=d2_domain,\n U1=lambda D1, D2: agent1_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n U2=lambda D1, D2: agent2_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n )\n return macid\n\n\ndef taxi_competition() -> MACID:\n \"\"\"MACIM representation of the Taxi Competition game.\n\n \"Taxi Competition\" is an example introduced in\n \"Equilibrium Refinements for Multi-Agent Influence Diagrams: Theory and Practice\"\n by Hammond, Fox, Everitt, Abate & Wooldridge, 2021:\n\n D2\n +----------+----------+----------+\n | taxi 1 | expensive| cheap |\n +----------+----------+----------+\n |expensive | 2 | 5 |\n D1 +----------+----------+----------+\n | cheap | 3 | 1 |\n +----------+----------+----------+\n\n D2\n +----------+----------+----------+\n | taxi 2 | expensive| cheap |\n +----------+----------+----------+\n |expensive | 2 | 3 |\n D1 +----------+----------+----------+\n | cheap | 5 | 1 |\n +----------+----------+----------+\n\n There are 3 pure strategy NE and 1 pure SPE.\n \"\"\"\n macid = MACID(\n [(\"D1\", \"D2\"), (\"D1\", \"U1\"), (\"D1\", \"U2\"), (\"D2\", \"U2\"), (\"D2\", \"U1\")],\n agent_decisions={1: [\"D1\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U1\"], 2: [\"U2\"]},\n )\n\n d1_domain = [\"e\", \"c\"]\n d2_domain = [\"e\", \"c\"]\n agent1_payoff = np.array([[2, 5], [3, 1]])\n agent2_payoff = agent1_payoff.T\n\n macid.add_cpds(\n D1=d1_domain,\n D2=d2_domain,\n U1=lambda D1, D2: agent1_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n U2=lambda D1, D2: agent2_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n )\n return macid\n\n\ndef modified_taxi_competition() -> MACID:\n \"\"\"Modifying the payoffs in the taxi competition example\n so that there is a tie break (if taxi 1 chooses to stop\n in front of the expensive hotel, taxi 2 is indifferent\n between their choices.)\n\n - There are now two SPNE\n\n D2\n +----------+----------+----------+\n | taxi 1 | expensive| cheap |\n +----------+----------+----------+\n |expensive | 2 | 5 |\n D1 +----------+----------+----------+\n | cheap | 3 | 1 |\n +----------+----------+----------+\n\n D2\n +----------+----------+----------+\n | taxi 2 | expensive| cheap |\n +----------+----------+----------+\n |expensive | 2 | 3 |\n D1 +----------+----------+----------+\n | cheap | 5 | 5 |\n +----------+----------+----------+\n\n \"\"\"\n macid = MACID(\n [(\"D1\", \"D2\"), (\"D1\", \"U1\"), (\"D1\", \"U2\"), (\"D2\", \"U2\"), (\"D2\", \"U1\")],\n agent_decisions={1: [\"D1\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U1\"], 2: [\"U2\"]},\n )\n\n d1_domain = [\"e\", \"c\"]\n d2_domain = [\"e\", \"c\"]\n agent1_payoff = np.array([[2, 5], [3, 1]])\n agent2_payoff = np.array([[2, 3], [5, 5]])\n\n macid.add_cpds(\n D1=d1_domain,\n D2=d2_domain,\n U1=lambda D1, D2: agent1_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n U2=lambda D1, D2: agent2_payoff[d1_domain.index(D1), d2_domain.index(D2)],\n )\n return macid\n\n\ndef robot_warehouse() -> MACID:\n r\"\"\"\n Implementation of AAMAS robot warehouse example\n\n - Robot 1 collects packages, and can choose to\n hurry or not (D1)\n - Hurrying can be quicker (Q) but lead to\n breakages (B)\n - Robot 2 tidies up, and can choose to repair\n (R) breakages or not (D2)\n - Conducting repairs can obstruct (O) robot 1\n - Robot 1 rewarded for speed and lack of\n breakages (U1), robot 2 is rewarded for things\n being in a state of repair (U2)\n\n \"\"\"\n macid = MACID(\n [\n (\"D1\", \"Q\"),\n (\"D1\", \"B\"),\n (\"Q\", \"U1\"),\n (\"B\", \"U1\"),\n (\"B\", \"R\"),\n (\"B\", \"D2\"),\n (\"D2\", \"R\"),\n (\"D2\", \"O\"),\n (\"O\", \"U1\"),\n (\"R\", \"U2\"),\n ],\n agent_decisions={\n 1: [\"D1\"],\n 2: [\"D2\"],\n },\n agent_utilities={\n 1: [\"U1\"],\n 2: [\"U2\"],\n },\n )\n\n macid.add_cpds(\n D1=[0, 1],\n D2=[0, 1],\n Q=lambda D1: noisy_copy(D1, domain=[0, 1]),\n B=lambda D1: noisy_copy(D1, probability=0.3, domain=[0, 1]),\n R=lambda B, D2: int(not B or D2),\n O=lambda D2: noisy_copy(D2, probability=0.6, domain=[0, 1]),\n U1=lambda Q, B, O: int(Q and not O) - int(B),\n U2=lambda R: R,\n )\n return macid\n\n\ndef tree_doctor() -> MACID:\n macid = MACID(\n [\n (\"PT\", \"E\"),\n (\"PT\", \"TS\"),\n (\"PT\", \"BP\"),\n (\"TS\", \"TDoc\"),\n (\"TS\", \"TDead\"),\n (\"TDead\", \"V\"),\n (\"TDead\", \"Tree\"),\n (\"TDoc\", \"TDead\"),\n (\"TDoc\", \"Cost\"),\n (\"TDoc\", \"BP\"),\n (\"BP\", \"V\"),\n ],\n agent_decisions={0: [\"PT\", \"BP\"], 1: [\"TDoc\"]},\n agent_utilities={0: [\"E\", \"V\"], 1: [\"Tree\", \"Cost\"]},\n )\n\n return macid\n\n\ndef forgetful_movie_star() -> MACID:\n macid = MACID(\n [\n (\"S\", \"D11\"),\n (\"S\", \"D12\"),\n (\"D2\", \"U2\"),\n (\"D2\", \"U11\"),\n (\"D11\", \"U2\"),\n (\"D11\", \"U11\"),\n (\"D11\", \"U12\"),\n (\"D12\", \"U12\"),\n ],\n agent_decisions={1: [\"D11\", \"D12\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U11\", \"U12\"], 2: [\"U2\"]},\n )\n return macid\n\n\ndef subgame_difference() -> MACID:\n macid = MACID(\n [\n (\"N\", \"D1\"),\n (\"N\", \"U1_A\"),\n (\"N\", \"U2_A\"),\n (\"D1\", \"U1_A\"),\n (\"D1\", \"U2_A\"),\n (\"D1\", \"U1_B\"),\n (\"D1\", \"U2_B\"),\n (\"D1\", \"D2\"),\n (\"D2\", \"U1_B\"),\n (\"D2\", \"U2_B\"),\n ],\n agent_decisions={1: [\"D1\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U1_A\", \"U1_B\"], 2: [\"U2_A\", \"U2_B\"]},\n )\n return macid\n\n\ndef road_example() -> MACID:\n macid = MACID(\n [\n (\"S1W\", \"B1W\"),\n (\"S1W\", \"U1W\"),\n (\"S1E\", \"B1E\"),\n (\"S1E\", \"U1E\"),\n (\"B1W\", \"U1W\"),\n (\"B1W\", \"U1E\"),\n (\"B1W\", \"B2E\"),\n (\"B1W\", \"U2W\"),\n (\"B1W\", \"B2W\"),\n (\"B1E\", \"U1E\"),\n (\"B1E\", \"U1W\"),\n (\"B1E\", \"B2E\"),\n (\"B1E\", \"U2E\"),\n (\"B1E\", \"B2W\"),\n (\"S2W\", \"B2W\"),\n (\"S2W\", \"U2W\"),\n (\"S2E\", \"B2E\"),\n (\"S2E\", \"U2E\"),\n (\"B2W\", \"U1W\"),\n (\"B2W\", \"U2W\"),\n (\"B2W\", \"U2E\"),\n (\"B2W\", \"B3E\"),\n (\"B2W\", \"U3W\"),\n (\"B2W\", \"B3W\"),\n (\"B2E\", \"U1E\"),\n (\"B2E\", \"U2E\"),\n (\"B2E\", \"U2W\"),\n (\"B2E\", \"B3E\"),\n (\"B2E\", \"U3E\"),\n (\"B2E\", \"B3W\"),\n (\"S3W\", \"B3W\"),\n (\"S3W\", \"U3W\"),\n (\"S3E\", \"B3E\"),\n (\"S3E\", \"U3E\"),\n (\"B3W\", \"U3W\"),\n (\"B3W\", \"U3E\"),\n (\"B3W\", \"U2W\"),\n (\"B3E\", \"U3E\"),\n (\"B3E\", \"U3W\"),\n (\"B3E\", \"U2E\"),\n ],\n agent_decisions={\n \"1W\": [\"B1W\"],\n \"1E\": [\"B1E\"],\n \"2W\": [\"B2W\"],\n \"2E\": [\"B2E\"],\n \"3W\": [\"B3W\"],\n \"3E\": [\"B3E\"],\n },\n agent_utilities={\n \"1W\": [\"U1W\"],\n \"1E\": [\"U1E\"],\n \"2W\": [\"U2W\"],\n \"2E\": [\"U2E\"],\n \"3W\": [\"U3W\"],\n \"3E\": [\"U3E\"],\n },\n )\n\n return macid\n\n\ndef politician() -> MACID:\n macid = MACID(\n [(\"D1\", \"I\"), (\"T\", \"I\"), (\"T\", \"U2\"), (\"I\", \"D2\"), (\"R\", \"D2\"), (\"D2\", \"U1\"), (\"D2\", \"U2\")],\n agent_decisions={1: [\"D1\"], 2: [\"D2\"]},\n agent_utilities={1: [\"U1\"], 2: [\"U2\"]},\n )\n return macid\n\n\ndef umbrella() -> MACID:\n macid = MACID(\n [(\"W\", \"F\"), (\"W\", \"A\"), (\"F\", \"UM\"), (\"UM\", \"A\")],\n agent_decisions={1: [\"UM\"]},\n agent_utilities={1: [\"A\"]},\n )\n\n cpd_w = TabularCPD(\"W\", 2, np.array([[0.6], [0.4]]))\n cpd_f = TabularCPD(\"F\", 2, np.array([[0.8, 0.3], [0.2, 0.7]]), evidence=[\"W\"], evidence_card=[2])\n cpd_a = TabularCPD(\n \"A\", 3, np.array([[0, 1, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), evidence=[\"W\", \"UM\"], evidence_card=[2, 2]\n )\n macid.add_cpds(cpd_w, cpd_f, cpd_a, UM=[0, 1])\n return macid\n\n\ndef sequential() -> MACID:\n macid = MACID(\n [(\"D1\", \"U1\"), (\"D1\", \"U2\"), (\"D1\", \"D2\"), (\"D2\", \"U1\"), (\"D2\", \"U2\")],\n agent_decisions={0: [\"D1\"], 1: [\"D2\"]},\n agent_utilities={0: [\"U1\"], 1: [\"U2\"]},\n )\n return macid\n\n\ndef signal() -> MACID:\n macid = MACID(\n [(\"X\", \"D1\"), (\"X\", \"U2\"), (\"X\", \"U1\"), (\"D1\", \"U2\"), (\"D1\", \"U1\"), (\"D1\", \"D2\"), (\"D2\", \"U1\"), (\"D2\", \"U2\")],\n agent_decisions={0: [\"D1\"], 1: [\"D2\"]},\n agent_utilities={0: [\"U1\"], 1: [\"U2\"]},\n )\n cpd_x = TabularCPD(\"X\", 2, np.array([[0.5], [0.5]]))\n\n u1_cpd_array = np.array(\n [\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n\n u2_cpd_array = np.array(\n [\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n\n cpd_u1 = TabularCPD(\"U1\", 6, u1_cpd_array, evidence=[\"X\", \"D1\", \"D2\"], evidence_card=[2, 2, 2])\n cpd_u2 = TabularCPD(\"U2\", 6, u2_cpd_array, evidence=[\"X\", \"D1\", \"D2\"], evidence_card=[2, 2, 2])\n\n macid.add_cpds(cpd_x, cpd_u1, cpd_u2, D1=[0, 1], D2=[0, 1])\n\n return macid\n\n\ndef triage() -> MACID:\n macid = MACID(\n [\n (\"H1\", \"D1\"),\n (\"H1\", \"U1\"),\n (\"H2\", \"D2\"),\n (\"H2\", \"U2\"),\n (\"D1\", \"U1\"),\n (\"D1\", \"U2\"),\n (\"D1\", \"D3\"),\n (\"D1\", \"D4\"),\n (\"D1\", \"U3\"),\n (\"D1\", \"U4\"),\n (\"D2\", \"U1\"),\n (\"D2\", \"U2\"),\n (\"D2\", \"D4\"),\n (\"D2\", \"D3\"),\n (\"D2\", \"U3\"),\n (\"D2\", \"U4\"),\n (\"H3\", \"D3\"),\n (\"H3\", \"U3\"),\n (\"H4\", \"D4\"),\n (\"H4\", \"U4\"),\n (\"D3\", \"U3\"),\n (\"D3\", \"U4\"),\n (\"D3\", \"U1\"),\n (\"D3\", \"U2\"),\n (\"D4\", \"U3\"),\n (\"D4\", \"U4\"),\n (\"D4\", \"U1\"),\n (\"D4\", \"U2\"),\n (\"D3\", \"U5\"),\n (\"D3\", \"U6\"),\n (\"D4\", \"U5\"),\n (\"D4\", \"U6\"),\n (\"D1\", \"U5\"),\n (\"D1\", \"U6\"),\n (\"D2\", \"U5\"),\n (\"D2\", \"U6\"),\n (\"H5\", \"D5\"),\n (\"H5\", \"U5\"),\n (\"H6\", \"D6\"),\n (\"H6\", \"U6\"),\n (\"D1\", \"D5\"),\n (\"D1\", \"D6\"),\n (\"D2\", \"D5\"),\n (\"D2\", \"D6\"),\n (\"D3\", \"D5\"),\n (\"D3\", \"D6\"),\n (\"D4\", \"D5\"),\n (\"D4\", \"D6\"),\n (\"D5\", \"U3\"),\n (\"D5\", \"U4\"),\n (\"D5\", \"U1\"),\n (\"D5\", \"U2\"),\n (\"D5\", \"U5\"),\n (\"D5\", \"U6\"),\n (\"D6\", \"U3\"),\n (\"D6\", \"U4\"),\n (\"D6\", \"U1\"),\n (\"D6\", \"U2\"),\n (\"D6\", \"U5\"),\n (\"D6\", \"U6\"),\n ],\n agent_decisions={\n 1: [\"D1\"],\n 2: [\"D2\"],\n 3: [\"D3\"],\n 4: [\"D4\"],\n 5: [\"D5\"],\n 6: [\"D6\"],\n },\n agent_utilities={\n 1: [\"U1\"],\n 2: [\"U2\"],\n 3: [\"U3\"],\n 4: [\"U4\"],\n 5: [\"U5\"],\n 6: [\"U6\"],\n },\n )\n\n return macid\n"
] |
[
[
"numpy.array",
"numpy.transpose"
]
] |
lollcat/Aspen-RL
|
[
"0abefb9e7def7762e829ac4d621519d9d01592c0"
] |
[
"hydrocarbon_problem/env/env_test.py"
] |
[
"import numpy as np\nimport time\n\nfrom hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI\nfrom hydrocarbon_problem.env.env import AspenDistillation\n\n\ndef make_fake_agent(env: AspenDistillation):\n def fake_agent(obs):\n del(obs)\n discrete_spec, continuous_spec = env.action_spec()\n discrete_action = np.random.randint(0, discrete_spec.num_values, size=())\n continuous_action = np.random.uniform(low=continuous_spec.minimum,\n high=continuous_spec.maximum,\n size=continuous_spec.shape)\n return discrete_action, continuous_action\n return fake_agent\n\n\ndef test(n_episodes: int = 2500, use_fake_api: bool = True):\n \"\"\"This test runs multiple environment episodes, running some simple sanity\n checks along the way.\n \"\"\"\n # api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen\n if use_fake_api:\n from hydrocarbon_problem.api.fake_api import FakeDistillationAPI\n api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen\n else:\n from hydrocarbon_problem.api.aspen_api import AspenAPI\n api = AspenAPI(max_solve_iterations=100)\n env = AspenDistillation(flowsheet_api=api)\n agent = make_fake_agent(env)\n simulation_time = []\n episodic_time = []\n converged = []\n _return = []\n episode = 1\n\n for i in range(n_episodes):\n start = time.time()\n print(f\"Episode: {episode}\")\n timestep = env.reset()\n episode_return = 0\n n_streams = 1\n while not timestep.last():\n observation = timestep.observation.upcoming_state\n action = agent(observation)\n timestep, duration, run_converged = env.step(action)\n simulation_time.append(duration)\n converged.append(run_converged)\n print(timestep)\n episode_return += timestep.reward\n discrete_action = action[0]\n if discrete_action == 0: # choose not to seperate\n # if we don't seperate then the created states are black, 0 reward is given, and\n # the discount for the created states is zero\n assert timestep.reward == 0.0\n assert timestep.discount.created_states == (0, 0)\n assert (timestep.observation.created_states[1] == env._blank_state).all()\n assert (timestep.observation.created_states[0] == env._blank_state).all()\n else:\n n_streams += 2 # 2 new streams created\n # if we choose to seperate a stream, then the reward should be non-zero, the created state\n # discount's should both be 1, the created_states should have non-zero values.\n assert not timestep.reward == 0.0\n if env._stream_table[-2].is_product:\n # if tops is product, check discount is 0 else, check discount is 1\n assert timestep.discount.created_states[0] == 0\n else:\n assert timestep.discount.created_states[0] == 1\n if env._stream_table[-1].is_product:\n # if bots is product, check discount is 0 else, check discount is 1\n assert timestep.discount.created_states[1] == 0\n else:\n assert timestep.discount.created_states[1] == 1\n assert not (timestep.observation.created_states[1] == env._blank_state).all()\n assert not (timestep.observation.created_states[0] == env._blank_state).all()\n if not timestep.last():\n # if the episode is not done, then check that the upcoming observation has\n # non-zero values\n assert not (timestep.observation.upcoming_state == env._blank_state).all()\n\n # check the stream table has the correct number of streams\n assert len(env._stream_table) == n_streams\n episode_timer = time.time() - start\n print(f\"episode complete with return of {episode_return}\")\n _return.append(episode_return)\n episodic_time.append(episode_timer)\n episode += 1\n\n if use_fake_api is False:\n api: AspenAPI\n simulation = api._flowsheet\n # now if I want to I can acesss some variable saved in simulation\n print(simulation)\n return simulation_time, converged, _return, episodic_time\n\n\nif __name__ == '__main__':\n use_fake_api = True\n if use_fake_api:\n test(100)\n else:\n simulation_time, converged, _return, episodic_time = test()\n\n # Separate the convergence data\n unconverged_separations = [index for (index, item) in enumerate(converged) if item == False]\n iterations_without_separation = [index for (index, item) in enumerate(converged) if item == \"no separation\"]\n converged_separation = [index for (index, item) in enumerate(converged) if item == True]\n\n # Number of non-Aspen runs\n number_of_iterations_without_separation = len(iterations_without_separation)\n\n # Number of Aspen runs\n number_of_unconverged_separations = len(unconverged_separations)\n number_of_converged_separations = len(converged_separation)\n number_of_non_separations = len(iterations_without_separation)\n total_separations = number_of_unconverged_separations + number_of_converged_separations\n\n percent_unconverged_separations = 100 * number_of_unconverged_separations/total_separations\n percent_converged_separations = 100 * number_of_converged_separations/total_separations\n\n # Filter returns\n rl_returns = []\n filtered_return = [index for (index, item) in enumerate(_return) if item != 0]\n for i in filtered_return:\n j = _return[i]\n rl_returns.append(j)\n average_rl_returns = np.average(rl_returns)\n\n # Filter simulation times and calculate the average\n aspen_time = []\n sim_time = [index for (index, item) in enumerate(simulation_time) if item != \"no separation\"]\n for i in sim_time:\n j = simulation_time[i]\n aspen_time.append(j)\n aspen_time = np.average(aspen_time)\n\n if number_of_converged_separations == 0 and number_of_unconverged_separations == 0:\n print(\"no separations were performed\")\n print(f\"Number of iterations = {len(converged)}\")\n\n else:\n print(f\"Number of iterations: {len(converged)}\")\n print(f\"Number of unconverged separations: {number_of_unconverged_separations}, \"\n f\"{percent_unconverged_separations} %\")\n print(f\"Number of converged separations: {number_of_converged_separations}, \"\n f\"{percent_converged_separations} %\")\n print(f\"Number of non separations: {number_of_non_separations}\")\n\n # print(f\"Episodic returns: {_return}\")\n print(f\"Average return: {average_rl_returns}\")\n\n print(f\"Average Aspen time: {aspen_time}\")\n # print(f\"Total sim array {simulation_time}\")\n\n # print(f\"Episodic time: {episodic_time}\")\n print(f\"Average episodic time: {np.average(episodic_time)}\")\n\n\n\n"
] |
[
[
"numpy.random.uniform",
"numpy.average",
"numpy.random.randint"
]
] |
mykolakozyr/stacviewer
|
[
"a5d1029aec9c428a7ce7ae615621ea8915162824"
] |
[
"app.py"
] |
[
"import streamlit as st\n\nimport pandas as pd\nimport itertools\nimport geopandas as gpd\n\nfrom lib.streamlit_keplergl import keplergl_static\nfrom keplergl import KeplerGl\n\nfrom shapely.geometry import shape\nfrom pystac_client import Client, exceptions\nfrom pystac import catalog\n\nMAP_EMOJI_URL = \"https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/240/apple/285/books_1f4da.png\"\n\n\n# Set page title and favicon.\nst.set_page_config(\n page_title=\"STAC Discovery\", \n page_icon=MAP_EMOJI_URL,\n layout=\"wide\"\n)\n# Display header.\nst.markdown(\"<br>\", unsafe_allow_html=True)\nst.image(MAP_EMOJI_URL, width=80)\n\n\"\"\"\n# STAC Discovery\n\"\"\"\n\ndef collectCollectionsInfo(root_catalog):\n # Expensive function. Added cache for it.\n\n # Empty list that would be used for a dataframe to collect and visualize info about collections\n collections_list = []\n # Reading collections in the Catalog\n collections = list(root_catalog.get_collections())\n for collection in collections:\n id = collection.id\n title = collection.title or collection.id\n #bbox = collection.extent.spatial.bboxes # not in use for the first release\n #interval = collection.extent.temporal.intervals # not in use for the first release\n description = collection.description\n \n # creating a list of lists of values\n collections_list.append([id, title, description])\n return(collections_list)\n\ndef collectItemsInfo(collection, limit_items):\n # Empty list that would be used for a dataframe to collect and visualize info about collections\n items_list = []\n # Reading items in the collection\n iterable = collection.get_all_items()\n items = list(itertools.islice(iterable, limit_items)) #getting first 25000 items. To Do some smarter logic\n if len(items) == 0:\n st.warning('''\n ⚠️ Ooops, looks like this collection does not have items 🤷♂️. \n\n Feel free to ping me on [Twitter](https://twitter.com/MykolaKozyr) or [LinkedIn](https://www.linkedin.com/in/mykolakozyr/) to discuss details. It's very possible I missed something.\n ''')\n st.stop()\n else:\n # Iterating over items to collect main information\n for item in items:\n id = item.id\n geometry = shape(item.geometry)\n datetime = item.datetime or item.properties['datetime'] or item.properties['end_datetime'] or item.properties['start_datetime']\n links = item.links\n for link in links:\n if link.rel == 'self':\n self_url = link.target\n assets_list = []\n assets = item.assets\n for asset in assets:\n assets_list.append(asset)\n\n # creating a list of lists of values\n items_list.append([id, geometry, datetime, self_url, assets_list])\n \n items_df = gpd.GeoDataFrame(items_list)\n items_df.columns=['id', 'geometry', 'datetime', 'self_url', 'assets_list']\n\n items_gdf = items_df.set_geometry('geometry')\n items_gdf[\"datetime\"] = items_gdf[\"datetime\"].astype(str) #specifically for KeplerGL. See https://github.com/keplergl/kepler.gl/issues/602\n items_gdf[\"assets_list\"] = items_gdf[\"assets_list\"].astype(str) #specifically for KeplerGL. See https://github.com/keplergl/kepler.gl/issues/602\n return(items_gdf)\n\ndef selectCollection(df):\n # Showing Title while defining ID of the Collection\n CHOICES = dict(zip(df.id, df.title))\n option = st.sidebar.selectbox(\"Select the collection\", CHOICES.keys(), format_func=lambda x:CHOICES[ x ], index=0)\n return(option)\n\nconfig = {\n \"version\":\"v1\",\n \"config\":{\n \"visState\":{\n \"filters\": [\n {\n \"dataId\": [\n \"staccollection\"\n ],\n \"id\": \"selectedcollection\",\n \"name\": [\n \"datetime\"\n ],\n \"type\": \"timeRange\",\n \"enlarged\": True,\n \"plotType\": \"histogram\",\n \"animationWindow\": \"free\",\n \"speed\": 1\n }\n ],\n },\n \"mapState\":{\n \"bearing\": 0,\n \"latitude\": 0.0,\n \"longitude\": 0.0,\n \"pitch\": 0,\n \"zoom\": 1\n }\n }\n}\n\n# Link to the STAC Catalog\nstacurl = st.sidebar.text_input('Please insert the link to the STAC Catalog', value=\"https://planetarycomputer.microsoft.com/api/stac/v1\")\nwith st.sidebar.expander(\"See examples\"):\n st.write(\"\"\"\n - [Microsoft Planetary Computer STAC API](https://planetarycomputer.microsoft.com/api/stac/v1) - ⏳\n - [STAC SPOT Orthoimages of Canada 2005-2010](https://canada-spot-ortho.s3.amazonaws.com/catalog.json) - ⏳⏳⏳\n - [Planet Root STAC Catalog](https://www.planet.com/data/stac/catalog.json) - ⏳⏳\n - [Digital Earth Africa](https://explorer.digitalearth.africa/stac/) - ⏳⏳⏳⏳\n - [Digital Earth Australia](https://explorer.sandbox.dea.ga.gov.au/stac/) - ⏳⏳⏳⏳⏳\n - [Global land use/land cover (LULC) map for the year 2020 at 10 meter resolution.](https://esri-lulc-2020-stac.s3.amazonaws.com/catalog.json) - ⏳⏳⏳\n\n ⏳ - time to load\n \"\"\")\n\n\"\"\"\n[](https://www.twitter.com/mykolakozyr)\n [](https://www.linkedin.com/in/mykolakozyr/)\n [](https://www.buymeacoffee.com/mykolakozyr)\n## Details\nSTAC Discovery tool enables discovering data in a given collection. The idea is to move from the approach of providing geometry and dates to discovering the whole catalog.\nThe main idea is to showcase the value of using **[STAC specification](http://stacspec.org/)** in describing geospatial information. Just imagine how great would it be to have a single interface to browse geospatial data. \n\nPlease note, it is not the best tool to discover data from large data catalogs. Use tools that designed to return first N items for a given area of interest.\n\nThis STAC Discovery tool is based on the [pystac-client](https://pystac-client.readthedocs.io/en/latest/) v.0.3.1. \n### Known Limitations\n- :warning: It requires STAC spec v1.0.1\n- :warning: Max number of items to return is limited to 25000\n---\n\"\"\"\n\nst.warning('''\n ⚠️ It usually takes some time to collect information from Catalogs. \n\n Please be patient. In most cases, it worths it 😉\n ''')\nst.markdown(\"---\")\n\n# Reading generic info about the Catalog\ntry:\n root_catalog = Client.open(stacurl)\nexcept exceptions.APIError:\n st.warning('⚠️ Ooops, something went wrong. The PySTAC-client library returned APIError.')\n st.stop()\nexcept catalog.STACTypeError:\n st.warning('⚠️ Ooops, something went wrong. The PySTAC library returned STACTypeError.')\n st.stop()\nexcept:\n st.error('''\n ⛔️ Ooops, something went wrong that is not covered by PySTAC-client library. \n\n Most likely the problem based on not compatible STAC version.\n Feel free to ping me on [Twitter](https://twitter.com/MykolaKozyr) or [LinkedIn](https://www.linkedin.com/in/mykolakozyr/) to discuss details. It's very possible I missed something.\n ''')\n st.stop()\n\nif root_catalog:\n st.sidebar.write(f\"ID: {root_catalog.id}\")\n st.sidebar.write(f\"Title: {root_catalog.title or 'N/A'}\")\n st.sidebar.write(f\"Description: {root_catalog.description or 'N/A'}\")\n\n # Creating a collections dataframe\n try:\n collections_df = collectCollectionsInfo(root_catalog)\n except:\n st.error('''\n ⛔️ Ooops, something went wrong that is not covered by PySTAC-client library. \n\n Most likely the problem based on not compatible STAC version.\n Feel free to ping me on [Twitter](https://twitter.com/MykolaKozyr) or [LinkedIn](https://www.linkedin.com/in/mykolakozyr/) to discuss details. It's very possible I missed something.\n ''')\n st.stop()\n\n if len(collections_df) == 0:\n st.warning('''\n ⚠️ Ooops, looks like catalog does not have collections 🤷♂️. \n\n Feel free to ping me on [Twitter](https://twitter.com/MykolaKozyr) or [LinkedIn](https://www.linkedin.com/in/mykolakozyr/) to discuss details. It's very possible I missed something.\n ''')\n st.stop()\n collections_df = pd.DataFrame(collections_df)\n st.sidebar.write(f\"Number of collections: {collections_df.shape[0]}\")\n st.sidebar.markdown(\"---\")\n collections_df.columns=['id', 'title', 'description']\n\n # Selecting specific collection\n option = selectCollection(collections_df)\n if option:\n collection = root_catalog.get_child(option)\n\n # Creating items dataframe\n limit_items = st.sidebar.slider('Please specify number of STAC items to return.', min_value=1, max_value=25000, value=1000,)\n w1 = KeplerGl(height=800)\n if st.sidebar.button('Visualize the STAC Collection!'):\n items_gdf = collectItemsInfo(collection, limit_items)\n st.sidebar.write(f\"Number of items: {items_gdf.shape[0]}\")\n\n # Creating KeplerGL map\n w1.add_data(data=items_gdf, name='staccollection')\n w1.config = config\n\n # Visualize in Streamlit\n keplergl_static(w1)\n else:\n st.stop()\nelse:\n st.stop()\n"
] |
[
[
"pandas.DataFrame"
]
] |
amoodie/rain-table_jeffskwang
|
[
"a378a24f2c239e5b8076a864e7f924c3c6397185"
] |
[
"rain_table/utils.py"
] |
[
"\nfrom matplotlib.colors import LinearSegmentedColormap\n\ndef terrain_cmap():\n cdict = {'red': ((0, 0.2, 0.2),\n (0.125, 0.50588, 0.50588),\n (0.25, 1, 1),\n (0.5, 0.95686, 0.95686),\n (0.625, 0.4, 0.4),\n (0.75, 0.4, 0.4),\n (1, 1, 1)),\n 'green': ((0, 0.4, 0.4),\n (0.125, 0.76471, 0.76471),\n (0.25, 1, 1),\n (0.5, 0.74118, 0.74118),\n (0.625, 0.2, 0.2),\n (0.75, 0.2, 0.2),\n (1, 1, 1)),\n 'blue': ((0, 0, 0),\n (0.125, 0.12157, 0.12157),\n (0.25, 0.8, 0.8),\n (0.5, 0.27059, 0.27059),\n (0.625, 0.047059, 0.047059),\n (0.75, 0, 0),\n (1, 1, 1))\n }\n\n return LinearSegmentedColormap('terrain2', cdict)\n\n\nclass Config: \n \"\"\"\n dummy config class for storing info during generation of GUI\n \"\"\"\n\n pass\n\n"
] |
[
[
"matplotlib.colors.LinearSegmentedColormap"
]
] |
TheMightyDotkey/vibhat
|
[
"80beaabfefd2059021eb4bb6abfd0a83e1b71e68"
] |
[
"_pytorch/ML_Test.py"
] |
[
"\nprint('test')\n\nfrom io import StringIO\nfrom os.path import dirname, join as pjoin\nimport numpy as np\nimport scipy.io as sp\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef datasetmaker(offset, matfilename):\n \"\"\"offset is multiple of 256 matfile name is output name\"\"\"\n\n #Computes FFT of dataset\n Fs = 50000\n T = 1/Fs\n L = 50000\n t = np.arange(0, L) * T\n a = 0 + 50000 * offset\n b = 49999 + 50000 * offset\n matfilenamedat = \"{}{}.csv\" .format(matfilename,offset) #Makes dataset filename\n matfilenamedat = pjoin(data_dir + '\\\\Sets', matfilenamedat)\n #vectorname = \"fftdata{}\" .format(matfilename)\n S = data[a:b]\n\n f = Fs * np.arange(0, L//2 + 1) / L\n\n Y = np.fft.fft(S, axis = 0)\n P2 = abs(Y/L)\n P1 = P2[0:L//2+1]\n P1[1:len(P1)-1] = 2*P1[1:len(P1)-1]\n #P1 = P1/max(P1) Standardizes the data. Was removed to use the scaler function later on\n P1[0] = 0\n np.savetxt(matfilenamedat, P1, delimiter=',')\n #sp.savemat(matfilenamedat, {vectorname:P1})\n #sp.savemat('timefile.mat', {'time':f})\n\n plt.plot(f,P1)\n plt.show()\n\nmatname = 'Good'\n\ndata_dir = 'C:\\\\Users\\\\Nick\\\\Desktop\\\\SeDesgn-master\\\\SeDesgn-master\\\\ML_Test\\\\NickRealData'\nmat_fname = pjoin(data_dir, matname + '.mat')\n\n\n\ndf = pd.read_csv('C:\\\\Users\\\\Nick\\\\source\\\\repos\\\\vibhat\\\\vibhat\\\\_pytorch\\\\2020_March_03_065029.csv')\n\n\nbearingdata = np.array(df)\nbearingdata = bearingdata[:,1]\n#sorted(bearingdata.keys())\nprint(bearingdata)\n\ndata = bearingdata\nprint(data)\n\ndataname = matname + 'set'\n\ndatasetmaker(0, dataname)\ndatasetmaker(1, dataname)\ndatasetmaker(2, dataname)\ndatasetmaker(3, dataname)\n\n\n\n\n#N = 1000\n#T = 1 / 800\n#x = np.linspace(0, N*T, N-1)\n#y = 2*np.sin(20*x*2*np.pi) + 6*np.sin(150*x*2*np.pi)\n#plt.plot(y)\n#ffty = np.fft.fft(y)\n#Spec = abs(ffty/N)\n#Dat = Spec[1:N//2+1]\n#Dat[2:len(Dat)-1] = 2 * Dat[2:len(Dat)-1]\n#f = 800 * np.linspace(0, 1, N//2) / N\n#plt.plot(f, Dat)\n\n\n#Fs = 1000\n#T = 1/Fs\n#L = 1500\n#t = np.arange(0, L) * T\n\n#S = 0.7 * np.sin(2 * np.pi * 50 * t) + np.sin(2 * np.pi * 120 * t)\n\n#f = Fs * np.arange(0, L//2 + 1) / L\n\n#Y = np.fft.fft(S)\n#P2 = abs(Y/L)\n#P1 = P2[0:L//2+1]\n#P1[1:len(P1)-1] = 2*P1[1:len(P1)-1]\n\n#plt.plot(f,P1)\n\n\n#plt.plot(fftx, np.abs(ffty[0:N//2]) * 2 / len(ffty))\n\n#plt.show()\n\n#Fs = 12000\n#T = 1/Fs\n#L = 256\n#t = np.arange(0, L) * T\n\n#S = data[0:256]\n\n#f = Fs * np.arange(0, L//2 + 1) / L\n\n#Y = np.fft.fft(S, axis = 0)\n#P2 = abs(Y/L)\n#P1 = P2[0:L//2+1]\n#P1[1:len(P1)-1] = 2*P1[1:len(P1)-1]\n\n#sp.savemat('normaldata1.mat', {'fftdata':P1})\n\n\n#plt.plot(f, P1)\n#plt.show()\n\n\n\n#plt.plot(data[0:100])\n\n#plt.show()\n\n#fftdata = np.fft.fft(data)\n\n#plt.plot(fftdata[0:100])\n\n#plt.show()\n"
] |
[
[
"pandas.read_csv",
"numpy.fft.fft",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.savetxt",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
kasramvd/Rexy
|
[
"1e50dd877cf254984c34cb0f7e98fa663143d13b"
] |
[
"Rexy/Administration/visualizer.py"
] |
[
"\"\"\"Contains the respective visualization tasks.\"\"\"\nimport mpld3\nimport matplotlib.pyplot as plt\n\n\nclass Visualizer:\n def __init__(self, *args, **kwargs):\n pass\n\n def pie_plot(self, **kwargs):\n\n fig1, ax1 = plt.subplots()\n ax1.pie(kwargs['sizes'],\n explode=kwargs['explode'],\n labels=kwargs['labels'],\n autopct='%1.1f%%',\n shadow=True,\n startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\n return mpld3.fig_to_html(fig1)\n"
] |
[
[
"matplotlib.pyplot.subplots"
]
] |
arunsechergy/AgentVision2
|
[
"f954811a19d5b9e4b4c94b4c1f9d7b3a38c6934e"
] |
[
"src/agentvision/midas/midas/midas_net.py"
] |
[
"\"\"\"MidashNet: Network for monocular depth estimation trained by mixing several datasets.\nThis file contains code that is adapted from\nhttps://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py\n\"\"\"\nimport torch\nimport torch.nn as nn\n\nfrom .base_model import BaseModel\nfrom .blocks import FeatureFusionBlock, Interpolate, _make_encoder\n\n\nclass MidasNet(BaseModel):\n \"\"\"Network for monocular depth estimation.\n \"\"\"\n\n def __init__(self, path=None, features=256, non_negative=True):\n \"\"\"Init.\n\n Args:\n path (str, optional): Path to saved model. Defaults to None.\n features (int, optional): Number of features. Defaults to 256.\n backbone (str, optional): Backbone network for encoder. Defaults to resnet50\n \"\"\"\n print(\"Loading weights: \", path)\n\n super(MidasNet, self).__init__()\n\n use_pretrained = False if path is None else True\n\n self.pretrained, self.scratch = _make_encoder(features, use_pretrained)\n\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n\n self.scratch.output_conv = nn.Sequential(\n nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),\n Interpolate(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(True),\n nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),\n nn.ReLU(True) if non_negative else nn.Identity(),\n )\n\n if path:\n self.load(path)\n\n def forward(self, x):\n \"\"\"Forward pass.\n\n Args:\n x (tensor): input data (image)\n\n Returns:\n tensor: depth\n \"\"\"\n # Encoder\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n\n # Encoder\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n\n # Decoder\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n\n out = self.scratch.output_conv(path_1)\n\n return torch.squeeze(out, dim=1)\n\n"
] |
[
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Identity",
"torch.squeeze"
]
] |
Dazz993/AlphaPose
|
[
"d4b9a3af5f590fa21bd033b4a19e98b5748ae683"
] |
[
"alphapose/utils/writer.py"
] |
[
"import os\nimport time\nfrom threading import Thread\nfrom queue import Queue\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.multiprocessing as mp\n\nfrom alphapose.utils.transforms import get_func_heatmap_to_coord\nfrom alphapose.utils.pPose_nms import pose_nms\n\nDEFAULT_VIDEO_SAVE_OPT = {\n 'savepath': 'examples/res/1.mp4',\n 'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),\n 'fps': 25,\n 'frameSize': (640, 480)\n}\n\nclass DataWriter():\n def __init__(self, cfg, opt, save_video=False,\n video_save_opt=DEFAULT_VIDEO_SAVE_OPT,\n queueSize=1024):\n self.cfg = cfg\n self.opt = opt\n self.video_save_opt = video_save_opt\n\n self.eval_joints = list(range(cfg.DATA_PRESET.NUM_JOINTS))\n self.save_video = save_video\n self.final_result = []\n self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)\n # initialize the queue used to store frames read from\n # the video file\n if opt.sp:\n self.result_queue = Queue(maxsize=queueSize)\n self.final_result_queue = Queue(maxsize=queueSize)\n else:\n self.result_queue = mp.Queue(maxsize=queueSize)\n self.final_result_queue = mp.Queue(maxsize=queueSize)\n\n if opt.save_img:\n if not os.path.exists(opt.outputpath + '/vis'):\n os.mkdir(opt.outputpath + '/vis')\n\n if opt.pose_track:\n from PoseFlow.poseflow_infer import PoseFlowWrapper\n self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))\n\n def start_worker(self, target):\n if self.opt.sp:\n p = Thread(target=target, args=())\n else:\n p = mp.Process(target=target, args=())\n # p.daemon = True\n p.start()\n return p\n\n def start(self):\n # start a thread to read pose estimation results per frame\n self.result_worker = self.start_worker(self.update)\n return self\n\n def update(self):\n if self.save_video:\n # initialize the file video stream, adapt ouput video resolution to original video\n stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])\n if not stream.isOpened():\n print(\"Try to use other video encoders...\")\n ext = self.video_save_opt['savepath'].split('.')[-1]\n fourcc, _ext = self.recognize_video_ext(ext)\n self.video_save_opt['fourcc'] = fourcc\n self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext\n stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])\n assert stream.isOpened(), 'Cannot open video for writing'\n # keep looping infinitelyd\n while True:\n # ensure the queue is not empty and get item\n (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)\n if orig_img is None:\n # if the thread indicator variable is set (img is None), stop the thread\n self.wait_and_put(self.final_result_queue, None)\n if self.save_video:\n stream.release()\n return\n # image channel RGB->BGR\n orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]\n if boxes is None:\n if self.opt.save_img or self.save_video or self.opt.vis:\n self.write_image(orig_img, im_name, stream=stream if self.save_video else None)\n else:\n # location prediction (n, kp, 2) | score prediction (n, kp, 1)\n pred = hm_data.cpu().data.numpy()\n assert pred.ndim == 4\n\n if hm_data.size()[1] == 49:\n self.eval_joints = [*range(0,49)]\n pose_coords = []\n pose_scores = []\n for i in range(hm_data.shape[0]):\n bbox = cropped_boxes[i].tolist()\n pose_coord, pose_score = self.heatmap_to_coord(pred[i][self.eval_joints], bbox)\n pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))\n pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))\n preds_img = torch.cat(pose_coords)\n preds_scores = torch.cat(pose_scores)\n result = pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)\n result = {\n 'imgname': im_name,\n 'result': result\n }\n if self.opt.pose_track:\n poseflow_result = self.pose_flow_wrapper.step(orig_img, result)\n for i in range(len(poseflow_result)):\n result['result'][i]['idx'] = poseflow_result[i]['idx']\n self.wait_and_put(self.final_result_queue, result)\n if self.opt.save_img or self.save_video or self.opt.vis:\n if hm_data.size()[1] == 49:\n from alphapose.utils.vis import vis_frame_dense as vis_frame\n elif self.opt.vis_fast:\n from alphapose.utils.vis import vis_frame_fast as vis_frame\n else:\n from alphapose.utils.vis import vis_frame\n img = vis_frame(orig_img, result, add_bbox=(self.opt.pose_track | self.opt.tracking))\n self.write_image(img, im_name, stream=stream if self.save_video else None)\n\n def write_image(self, img, im_name, stream=None):\n if self.opt.vis:\n cv2.imshow(\"AlphaPose Demo\", img)\n cv2.waitKey(30)\n if self.opt.save_img:\n cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)\n if self.save_video:\n stream.write(img)\n\n def wait_and_put(self, queue, item):\n queue.put(item)\n\n def wait_and_get(self, queue):\n return queue.get()\n\n def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):\n self.commit()\n # save next frame in the queue\n self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))\n\n def running(self):\n # indicate that the thread is still running\n time.sleep(0.2)\n self.commit()\n return not self.result_queue.empty()\n\n def count(self):\n # indicate the remaining images\n return self.result_queue.qsize()\n\n def stop(self):\n # indicate that the thread should be stopped\n self.save(None, None, None, None, None, None, None)\n while True:\n final_res = self.wait_and_get(self.final_result_queue)\n if final_res:\n self.final_result.append(final_res)\n else:\n break\n self.result_worker.join()\n\n def clear_queues(self):\n self.clear(self.result_queue)\n self.clear(self.final_result_queue)\n \n def clear(self, queue):\n while not queue.empty():\n queue.get()\n\n def commit(self):\n # commit finished final results to main process\n while not self.final_result_queue.empty():\n self.final_result.append(self.wait_and_get(self.final_result_queue))\n\n def results(self):\n # return final result\n return self.final_result\n\n def recognize_video_ext(self, ext=''):\n if ext == 'mp4':\n return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext\n elif ext == 'avi':\n return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext\n elif ext == 'mov':\n return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext\n else:\n print(\"Unknow video format {}, will use .mp4 instead of it\".format(ext))\n return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'\n\n"
] |
[
[
"torch.multiprocessing.Queue",
"torch.cat",
"torch.from_numpy",
"numpy.array",
"torch.multiprocessing.Process"
]
] |
bruinxiong/mesh-1
|
[
"a1259b927dc34eb16251b33a62bc9fed112893f9"
] |
[
"mesh_tensorflow/simd_mesh_impl.py"
] |
[
"# coding=utf-8\n# Copyright 2019 The Mesh TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"SIMD Mesh implementation (for TPU/XLA).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\n\nfrom mesh_tensorflow import ops_with_redefined_builtins as mtf\nfrom mesh_tensorflow import tpu_variables\nfrom mesh_tensorflow import utils\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.python.tpu.ops import tpu_ops # pylint: disable=g-direct-tensorflow-import\n\n\nclass SimdMeshImpl(mtf.MeshImpl):\n \"\"\"Mesh implementation for TPU using SIMD and MPI operations.\"\"\"\n\n def __init__(self,\n shape,\n layout,\n devices=None,\n device_assignment=None,\n logical_to_physical=None,\n allreduce_in_bfloat16_max_group_size=32,\n ):\n \"\"\"Create a SimdMeshImpl.\n\n Args:\n shape: an input to mtf.convert_to_shape()\n layout: an input to mtf.convert_to_layout_rules()\n devices: deprecated\n device_assignment: a tf.tpu.experimental.DeviceAssignment -\n devices must be asssigned in lexicographic order\n logical_to_physical: an optional permutation representing the mapping\n from logical cores to \"physical\" cores, where the physical cores are\n listed in lexicographic order in the physical mesh, and the logical\n cores are listed in lexicographic order in the logical mesh.\n Default is lexicographic order.\n allreduce_in_bfloat16_max_group_size: an integer. Allreduces of bfloat16\n tensors are done in float32 if the group size exceeds this value.\n \"\"\"\n super(SimdMeshImpl, self).__init__(shape, layout)\n if devices is not None:\n tf.logging.warning(\"SimdMeshImpl ignoring devices %s\" % devices)\n self._device_assignment = device_assignment\n tf.logging.info(\"SimdMeshImpl init: {0} {1}\".format(shape, layout))\n tf.logging.info(\"Device Assignment: {0}\".format(device_assignment))\n if logical_to_physical is None:\n # TODO(noam): maybe use auto_logical_to_physical_tpu() here\n logical_to_physical = list(range(self.size))\n if sorted(logical_to_physical) != list(range(self.size)):\n raise ValueError(\n \"logical_to_physical must be a permutation on range(shape.size)\"\n \" shape=%s logical_to_physical=%s\" % (shape, logical_to_physical))\n self._logical_to_physical = logical_to_physical\n self._physical_to_logical = [None] * self.size\n for logical, physical in enumerate(self._logical_to_physical):\n self._physical_to_logical[physical] = logical\n self._pnum_tensor = None\n self.graph_device_function_stacks = []\n self.copy_master_to_slice_ops = []\n self._allreduce_in_bfloat16_max_group_size = (\n allreduce_in_bfloat16_max_group_size)\n\n @property\n def pnum_tensor(self):\n if self._pnum_tensor is not None:\n return self._pnum_tensor\n with utils.outside_all_rewrites():\n tf.logging.info(\"Create pnum_tensor\")\n self._pnum_tensor = tpu_ops.tpu_replicated_input(\n self._physical_to_logical, name=\"pnum_constants\")\n return self._pnum_tensor\n\n def l2p(self, logical_pnum):\n return self._logical_to_physical[logical_pnum]\n\n def p2l(self, physical_pnum):\n return self._physical_to_logical[physical_pnum]\n\n class LaidOutTensor(object):\n \"\"\"One Slice.\"\"\"\n\n def __init__(self, tensor_list):\n assert isinstance(tensor_list, list)\n self._tensor_list = tensor_list\n\n def __repr__(self):\n return \"[\" + \",\".join([str(t) for t in self._tensor_list]) + \"]\"\n\n @property\n def tensor_list(self):\n return self._tensor_list\n\n @property\n def one_slice(self):\n return self._tensor_list[0]\n\n @classmethod\n def from_tensor_list(cls, tensor_list):\n return cls(tensor_list)\n\n @property\n def all_slices(self):\n return self._tensor_list\n\n @property\n def slice_shape(self):\n return self.one_slice.shape.as_list()\n\n def to_laid_out_tensor(self):\n return self\n\n class LaidOutVariable(object):\n \"\"\"Maintains slice-variables and copy operations.\"\"\"\n\n def __init__(self, variable, mesh_impl):\n \"\"\"Create a LaidOutVariable.\n\n Args:\n variable: a Variable (Operation)\n mesh_impl: a MeshImpl\n \"\"\"\n self._variable = variable\n self._mesh_impl = mesh_impl\n shape = variable.outputs[0].shape\n slice_shape = mesh_impl.slice_shape(shape)\n base_name = variable.name\n slices = []\n slices_with_master_dtype = []\n with tf.device(variable.master_device), utils.outside_all_rewrites():\n zero_tensor = tf.zeros(slice_shape, dtype=variable.slice_dtype)\n\n # pylint: disable=protected-access\n init_device_stack = tf.get_default_graph()._device_function_stack\n\n if not mesh_impl.graph_device_function_stacks:\n for pnum in xrange(mesh_impl.size):\n tpu_device = mesh_impl.device_assignment.tpu_device(replica=pnum)\n with tf.device(tpu_device):\n mesh_impl.graph_device_function_stacks.append(\n tf.get_default_graph()._device_function_stack.copy())\n\n for physical_pnum in xrange(mesh_impl.size):\n slice_var_name = base_name + \"_slice_%d\" % physical_pnum\n # Use tf.Variable instead of tf.get_variable since latter adds lots of\n # useless operations to the TF graph. Use tf.get_variable only if\n # in a AUTO_REUSE scope.\n # Note: Repeatedly 'with tf.device():' slows down the graph\n # construction. Therefore we directly use the cached device_stack here.\n tf.get_default_graph()._device_function_stack = (\n mesh_impl.graph_device_function_stacks[physical_pnum])\n\n if tf.get_variable_scope().reuse == tf.AUTO_REUSE:\n slice_var = tf.get_variable(\n initializer=zero_tensor,\n trainable=self._variable.trainable,\n collections=[\"TPU_VAR\"],\n dtype=variable.slice_dtype,\n name=slice_var_name)\n else:\n slice_var = tf.Variable(\n initial_value=zero_tensor,\n trainable=self._variable.trainable,\n collections=[\"TPU_VAR\"],\n dtype=variable.slice_dtype,\n name=slice_var_name,\n expected_shape=slice_shape)\n\n slices.append(slice_var)\n\n # Restore the initial stack\n tf.get_default_graph()._device_function_stack = init_device_stack\n # pylint: enable=protected-access\n\n self._laid_out_tensor = mesh_impl.LaidOutTensor(\n [tpu_variables.ReplicatedVariable(base_name, slices)])\n with tf.device(variable.master_device), utils.outside_all_rewrites():\n if os.environ.get(\"MTF_SEQUENCE_MODE\", \"\") == \"1\":\n if mesh_impl.copy_master_to_slice_ops:\n with tf.control_dependencies(\n [mesh_impl.copy_master_to_slice_ops[-1]]):\n self._copy_master_to_slices = self._gen_copy_master_to_slices_op(\n variable.get_master(), shape, slices, slice_shape)\n else:\n self._copy_master_to_slices = self._gen_copy_master_to_slices_op(\n variable.get_master(), shape, slices, slice_shape)\n\n mesh_impl.copy_master_to_slice_ops.append(self._copy_master_to_slices)\n else:\n self._copy_master_to_slices = self._gen_copy_master_to_slices_op(\n variable.get_master(), shape, slices, slice_shape)\n slices_with_master_dtype = [\n tf.cast(s, variable.master_dtype) for s in slices]\n slices_with_master_dtype = [\n slices_with_master_dtype[mesh_impl.l2p(logical_pnum)]\n for logical_pnum in range(mesh_impl.size)]\n self._copy_slices_to_master = variable.assign_to_master(\n mesh_impl.combine_slices(slices_with_master_dtype, shape,\n device=variable.master_device))\n\n def _gen_copy_master_to_slices_op(self, master_variable, master_shape,\n slices, slice_shape):\n \"\"\"Generate ops which slices master and assign to slices.\n\n Args:\n master_variable: The master variable.\n master_shape: The shape of master variable.\n slices: The list of slice-variables in physical order.\n slice_shape: The shape of the slice variable.\n Returns:\n A grouped tf.assign ops.\n \"\"\"\n mesh_impl = self._mesh_impl\n master_layout = mesh_impl.tensor_layout(master_shape)\n # For handling case: master is float32 and slices are bfloat16.\n if master_variable.dtype != slices[0].dtype:\n master_variable = tf.cast(master_variable, slices[0].dtype)\n assign_ops = []\n if master_layout.is_fully_replicated:\n assign_ops = [tf.assign(t, master_variable) for t in slices]\n else:\n slice_dict = {}\n for logical_pnum in xrange(len(slices)):\n slice_begin = mesh_impl.slice_begin(master_shape, logical_pnum)\n slice_begin_tuple = tuple(slice_begin)\n # Reuse the same slice if slice_begin doesn't change.\n if slice_begin_tuple not in slice_dict:\n slice_dict[slice_begin_tuple] = tf.slice(master_variable,\n slice_begin, slice_shape)\n physical_pnum = mesh_impl.l2p(logical_pnum)\n assign_ops.append(\n tf.assign(slices[physical_pnum], slice_dict[slice_begin_tuple]))\n return tf.group(assign_ops)\n\n def assign_to_slices(self, assign_fn, values, assign_to_tensor_list=None):\n \"\"\"Assign to the slice variables.\n\n Args:\n assign_fn: a function from\n (mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation\n values: a list of tf.Tensor\n assign_to_tensor_list: an optional list of tf.Variable\n\n Returns:\n a tf.operation\n \"\"\"\n if assign_to_tensor_list is None:\n assign_to_tensor_list = self._laid_out_tensor.all_slices\n # Handle both N -> 1 and N -> N cases.\n num_slices = min(len(assign_to_tensor_list), len(values))\n devices = [\"\"] * num_slices\n return tf.group(\n mtf.parallel(devices, assign_fn,\n [self._variable] * len(devices),\n assign_to_tensor_list[:num_slices],\n values[:num_slices]))\n\n @property\n def laid_out_tensor(self):\n return self._laid_out_tensor\n\n @property\n def copy_master_to_slices(self):\n return self._copy_master_to_slices\n\n @property\n def copy_slices_to_master(self):\n return self._copy_slices_to_master\n\n def laid_out_pnum(self):\n \"\"\"Returns a LaidOutTensor containing the logical processor number.\n\n Returns:\n a LaidOutTensor where each slice is an integer scalar\n \"\"\"\n return self.LaidOutTensor([self.pnum_tensor])\n\n def _create_group_assignment(self, mesh_axes):\n \"\"\"Create group assignment for XLA cross replica ops (physical pnums).\"\"\"\n\n partitioning = {}\n for logical_pnum in xrange(self.size):\n group = mtf.pnum_to_group(self.shape, mesh_axes, logical_pnum)\n if group not in partitioning:\n partitioning[group] = []\n partitioning[group].append(self.l2p(logical_pnum))\n group_assignment = []\n for group, physical_pnums in partitioning.items():\n group_assignment.append(physical_pnums)\n return group_assignment\n\n def allreduce(self, x, mesh_axes, reduction_fn_string):\n \"\"\"Grouped allreduce, (summed across the given dimensions).\n\n Args:\n x: a LaidOutTensor\n mesh_axes: a list of integers\n reduction_fn_string: \"SUM\"\n Returns:\n a LaidOutTensor\n Raises:\n ValueError: if the reduction is not yet implemented.\n \"\"\"\n if not mesh_axes:\n return x\n x = x.to_laid_out_tensor()\n if reduction_fn_string == \"SUM\":\n group_assignment = self._create_group_assignment(mesh_axes)\n group_size = len(group_assignment[0])\n tf_in = x.one_slice\n dtype = tf_in.dtype\n if dtype == tf.float32:\n cast_to_float32 = False\n elif dtype == tf.bfloat16:\n cast_to_float32 = (\n group_size > self._allreduce_in_bfloat16_max_group_size)\n else:\n tf.logging.info(\"Casting %s to float32 for allreduce\" % tf_in.dtype)\n cast_to_float32 = True\n if cast_to_float32:\n tf_in = tf.cast(tf_in, tf.float32)\n tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)\n if cast_to_float32:\n tf_out = tf.cast(tf_out, dtype)\n return self.LaidOutTensor([tf_out])\n else:\n for axis in mesh_axes:\n x = self.allconcat(x, axis, 0, stack=True)\n x = self.LaidOutTensor(\n [mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])\n return x\n\n def allconcat(self, x, mesh_axis, concat_axis, stack=False):\n \"\"\"Grouped allconcat (like MPI allgather followed by concat).\n\n TODO(noam): inefficient - replace with a XLA allconcat when available\n\n Args:\n x: a LaidOutTensor\n mesh_axis: an integer - the mesh axis along which to group\n concat_axis: an integer (the Tensor axis along which to concatenate)\n stack: a boolean - whether to stack instead of concat\n Returns:\n a LaidOutTensor\n \"\"\"\n x = x.to_laid_out_tensor()\n coord = self.laid_out_pcoord(mesh_axis)\n t = x.one_slice\n old_shape = t.shape.as_list()\n num_parts = self.shape[mesh_axis].size\n t = tf.expand_dims(t, concat_axis)\n t *= tf.reshape(\n tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype),\n [num_parts if i == concat_axis else 1\n for i in xrange(len(old_shape) + 1)])\n if not stack:\n new_shape = old_shape[:]\n new_shape[concat_axis] *= num_parts\n t = tf.reshape(t, new_shape)\n return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], \"SUM\")\n\n def alltoall(self, x, mesh_axis, split_axis, concat_axis):\n \"\"\"Grouped alltoall (like MPI alltoall with splitting and concatenation).\n\n Args:\n x: a LaidOutTensor\n mesh_axis: an integer the mesh axis along which to group\n split_axis: an integer (the Tensor axis along which to split)\n concat_axis: an integer (the Tensor axis along which to concatenate)\n Returns:\n a LaidOutTensor\n \"\"\"\n x = x.to_laid_out_tensor()\n t = x.one_slice\n group_assignment = self._create_group_assignment([mesh_axis])\n dtype = t.dtype\n if dtype == tf.float32:\n # There seems to be a bug with float32 alltoall.\n # Do it in bfloat16 until the bug is fixed.\n # TODO(noam): file a bug\n t = tf.to_bfloat16(t)\n t = tpu_ops.all_to_all(\n t,\n concat_dimension=concat_axis,\n split_dimension=split_axis,\n split_count=len(group_assignment[0]),\n group_assignment=group_assignment)\n t = tf.cast(t, dtype)\n x = self.LaidOutTensor([t])\n return x\n\n def receive(self, x, mesh_axis, source_pcoord):\n \"\"\"Collective receive in groups.\n\n Each group contains the processors that differ only in mesh_axis.\n\n ```python\n group_size = self.shape[mesh_axis].size\n ```\n\n Args:\n x: a LaidOutTensor\n mesh_axis: an integer\n source_pcoord: a list of optional integers. Each element is either None\n or an integer in [0, group_size). If source_pcoord[k] is None, then the\n output for the k-th processor in each group is a zero tensor. If\n source_pcoord[k] is not None, then the output for the k-th processor in\n each group is equal to the input for the source_pcoord[k]-th processor\n in that group.\n\n Returns:\n a LaidOutTensor\n \"\"\"\n x = x.to_laid_out_tensor()\n t = x.one_slice\n source_target_pairs = []\n\n for pnum in xrange(self.size):\n coord = mtf.pnum_to_processor_coordinates(self.shape, pnum)\n k = coord[mesh_axis]\n if source_pcoord[k] is not None:\n coord[mesh_axis] = source_pcoord[k]\n source_pnum = mtf.processor_coordinates_to_pnum(self.shape, coord)\n source_target_pairs.append(\n [self.l2p(source_pnum),\n self.l2p(pnum)])\n\n if not source_target_pairs:\n ret = tf.zeros_like(t, t.dtype)\n elif t.dtype in [tf.float32, tf.bfloat16, tf.int32]:\n ret = tpu_ops.collective_permute(t, source_target_pairs)\n else:\n # If t is not one of the allowed types, cast and cast back.\n ret = tf.cast(tpu_ops.collective_permute(\n tf.cast(t, tf.float32), source_target_pairs), t.dtype)\n\n return self.LaidOutTensor([ret])\n\n def slice(self, tf_tensor, tensor_shape):\n \"\"\"\"Slice out the corresponding part of tensor given the pnum variable.\"\"\"\n tensor_layout = self.tensor_layout(tensor_shape)\n\n if tensor_layout.is_fully_replicated:\n return self.LaidOutTensor([tf_tensor])\n else:\n slice_shape = self.slice_shape(tensor_shape)\n slice_begins = [\n self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size)\n ]\n slice_begins_tensor = tf.stack(slice_begins)\n # slice on source device\n selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor)\n return self.LaidOutTensor(\n [tf.slice(tf_tensor, selected_slice_begin, slice_shape)])\n\n def slicewise(self, fn, *inputs):\n \"\"\"Execute a function in parallel on all slices.\n\n Args:\n fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.\n *inputs: a list of inputs. Each input is either a LaidOutTensor or\n is convertible to a tf.Tensor.\n Returns:\n a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.\n \"\"\"\n if fn == tf.add:\n assert len(inputs) == 2\n if isinstance(inputs[0], mtf.LazyAllreduceSum):\n # sum of LazyAllreduceSum (keep delaying the allreduce)\n return inputs[0] + inputs[1]\n # convert all inputs to LaidOutTensor where possible\n inputs = mtf.convert_args_to_laid_out_tensors(inputs)\n ret = fn(*[\n x.one_slice if isinstance(x, self.LaidOutTensor) else x\n for x in inputs])\n if isinstance(ret, tuple):\n return tuple([self.LaidOutTensor([t]) for t in ret])\n else:\n return self.LaidOutTensor([ret])\n\n @property\n def device_assignment(self):\n return self._device_assignment\n\n @property\n def devices(self):\n return self._devices\n\n def random(self, shape, tf_fn, kwargs):\n \"\"\"Call a random tf operation (e.g. random_uniform).\n\n Args:\n shape: a Shape\n tf_fn: a function such as tf.random.uniform\n kwargs: kwargs to pass to tf_fn, except for seed\n\n Returns:\n a LaidOutTensor\n \"\"\"\n # TODO(noam): can we make things better with stateless_random?\n slice_shape = self.slice_shape(shape)\n x = tf_fn(slice_shape, **kwargs)\n # TPU does not have seeds enabled. Sync up the\n # random choices by zeroing out all but the first core per group of\n # identical slices, then allreducing by group.\n layout = self.tensor_layout(shape)\n # we need to sync across these axes.\n mesh_axes = [i for i in xrange(self.ndims)\n if i not in layout.tensor_axis_to_mesh_axis]\n multiplier = 1.0\n for axis in mesh_axes:\n multiplier *= tf.cast(\n tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype)\n x *= multiplier\n x = self.LaidOutTensor([x])\n x = self.allreduce(x, mesh_axes, \"SUM\")\n return x\n\n def export_to_tf_tensor(self, x, laid_out_x):\n \"\"\"Turn a Tensor into a tf.Tensor.\n\n Args:\n x: a Tensor\n laid_out_x: a LaidOutTensor\n Returns:\n a tf.Tensor\n \"\"\"\n tensor_layout = self.tensor_layout(x.shape)\n if not tensor_layout.is_fully_replicated:\n raise NotImplementedError(\n \"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated \"\n \"Tensors. Try reshaping to new dimension names. \"\n \" x.shape = %s tensor_layout=%s\"\n % (x.shape, tensor_layout))\n return laid_out_x.one_slice\n\n def import_tf_tensor(self, x, tf_x):\n \"\"\"Import a tf.Tensor, producing a LaidOutTensor.\n\n Args:\n x: a Tensor\n tf_x: a tf.Tensor\n Returns:\n a LaidOutTensor\n \"\"\"\n return self.slice(tf_x, x.shape)\n\n @property\n def supports_control_dependencies(self):\n return False\n\n def einsum(self, equation, *slices):\n \"\"\"Override this for custom einsum implementation.\n\n Args:\n equation: a string\n *slices: a list of tf.Tensor\n Returns:\n a tf.Tensor\n \"\"\"\n return tf.einsum(equation, *slices)\n\n\ndef _ring_2d(m, n):\n \"\"\"Ring-order of a mxn mesh.\n\n If m and n are both even, then we generate a ring like this:\n\n 0 -- 1 -- 2 -- 3\n | | | |\n 15-- 6 -- 5 -- 4\n | | | |\n 14-- 7 -- 8 -- 9\n | | | |\n 13-- 12-- 11-- 10\n\n Args:\n m: an integer\n n: an integer\n Returns:\n a list of mxn pairs\n \"\"\"\n if m == 1:\n return [(0, i) for i in range(n)]\n if n == 1:\n return [(i, 0) for i in range(m)]\n if m % 2 != 0:\n tf.logging.warning(\"Odd dimension\")\n return [(i % m, i // m) for i in range(n * m)]\n ret = [(0, 0)]\n for i in range(m // 2):\n for j in range(1, n):\n ret.append((2 * i, j))\n for j in range(n-1, 0, -1):\n ret.append((2 * i + 1, j))\n for i in range(m-1, 0, -1):\n ret.append((i, 0))\n return ret\n\n\ndef tile_2d(physical_shape, tile_shape,\n outer_name=\"outer\",\n inner_name=\"inner\",\n cores_name=None):\n \"\"\"2D tiling of a 3d physical mesh.\n\n The \"outer\" mesh dimension corresponds to which tile.\n The \"inner\" mesh dimension corresponds to the position within a tile\n of processors.\n\n Optionally, if cores_name is specified, then a 3 dimensional logical mesh\n is returned, with the third dimension representing the two different\n cores within a chip. If cores_name is not specified, then the\n cores-in-a-chip dimension is folded into the inner dimension.\n\n TODO(noam): explain this better.\n\n Example:\n\n tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4])\n\n The \"inner\" dimension has size 4x4x2=32 and corresponds to the position\n within a 4x4 tile of processors.\n\n The \"outer\" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8\n tiles in the mesh.\n\n Args:\n physical_shape: a triple of integers [X, Y, cores]\n tile_shape: a pair\n outer_name: a string\n inner_name: a string\n cores_name: an optional string\n\n Returns:\n mesh_shape: a mtf.Shape\n logical_to_physical: a list\n \"\"\"\n logical_to_physical = []\n p0, p1, p2 = physical_shape\n t0, t1 = tile_shape\n tile_ring = _ring_2d(t0, t1)\n tiles_ring = _ring_2d(p0 // t0, p1 // t1)\n for logical_pnum in range(p0 * p1 * p2):\n logical_tile_num = logical_pnum // (t0 * t1 * p2)\n if p2 == 2 and not cores_name:\n # Go through all chips using core 0, then go through all chips\n # backwards using core 1. This is better in the case where\n # one of the tile dimensions is 1, so the last chip is not adjacent\n # to the first chip.\n core_in_tile = logical_pnum % (t0 * t1 * p2)\n core_on_chip = core_in_tile // (t0 * t1)\n if core_on_chip == 0:\n logical_pos_in_tile = core_in_tile\n else:\n logical_pos_in_tile = t0 * t1 * p2 - 1 - core_in_tile\n else:\n # Go through all chips once, using both cores on each chip.\n core_on_chip = logical_pnum % p2\n logical_chip_num = logical_pnum // p2\n logical_pos_in_tile = logical_chip_num % (t0 * t1)\n tile_i, tile_j = tile_ring[logical_pos_in_tile]\n tiles_i, tiles_j = tiles_ring[logical_tile_num]\n physical_pnum = core_on_chip + p2 * (\n tile_i * p1 + tile_j +\n tiles_i * p1 * t0 + tiles_j * t1)\n logical_to_physical.append(physical_pnum)\n assert sorted(logical_to_physical) == list(range(p0 * p1 * p2))\n tile_size = t0 * t1 * p2\n num_tiles = p0 * p1 // (t0 * t1)\n if cores_name:\n mesh_shape = mtf.Shape(\n [mtf.Dimension(outer_name, int(num_tiles)),\n mtf.Dimension(inner_name, int(t0 * t1)),\n mtf.Dimension(cores_name, int(p2))])\n else:\n mesh_shape = mtf.Shape(\n [mtf.Dimension(outer_name, int(num_tiles)),\n mtf.Dimension(inner_name, int(tile_size))])\n return mesh_shape, logical_to_physical\n\n\ndef auto_logical_to_physical_tpu(logical_shape,\n physical_shape,\n return_coordinates=False):\n \"\"\"Set up a mapping from logical to physical cores for TPU.\n\n We will try to set up a mapping so that allreduce operations are relatively\n fast, prioritizing the later dimensions in the mesh_shape.\n\n Example:\n\n auto_logical_to_physical_tpu(logical_shape=[16, 8], physical_shape=[8, 8, 2])\n\n Heuristics in this function subject to change.\n\n Args:\n logical_shape: a list of integers\n physical_shape: a list of integers - typically [X, Y, cores]\n return_coordinates: a boolean - return a list of integer lists (coordinates)\n instead of a list of processor indices\n\n Returns:\n logical_to_physical: a permutation of range(product(physical_shape)))\n \"\"\"\n if mtf.list_product(logical_shape) != mtf.list_product(physical_shape):\n raise ValueError(\n \"physical and logical shapes must have the same product \"\n \"physical_shape=%s logical_shape=%s\" % (physical_shape, logical_shape))\n # drop logical dimensions of size 1\n logical_shape = [i for i in logical_shape if i != 1]\n num_cores = mtf.list_product(logical_shape)\n # For physical shapes different from what we are used to [2^a, 2^b, 2],\n # return a simple default value (a lexicographic ordering)\n def _default_value():\n default = list(range(num_cores))\n if return_coordinates:\n default = [mtf.pnum_to_processor_coordinates(i) for i in default]\n return default\n if len(physical_shape) != 3:\n return _default_value()\n p0, p1, p2 = physical_shape\n if p2 != 2:\n return _default_value\n for dimsize in [p0, p1]:\n # if dimsize not a power of 2, give up\n if dimsize & (dimsize - 1):\n return _default_value()\n # At this point, the physical shape has at least 1x1x2=2 cores, so there\n # must be at least one logical dimension.\n assert logical_shape\n if len(logical_shape) == 1:\n # ring of p0 x p1 chips\n ring = _ring_2d(p0, p1)\n logical_to_physical = []\n for logical_pnum in range(num_cores):\n # Go through all chips using core 0, then go through all chips\n # backwards using core 1. This is better in the case where\n # one of the tile dimensions is 1, so the last chip is not adjacent\n # to the first chip.\n core_on_chip = logical_pnum // (p0 * p1)\n if core_on_chip == 0:\n chip_num = logical_pnum\n else:\n chip_num = num_cores - 1 - logical_pnum\n i, j = ring[chip_num]\n logical_to_physical.append((i, j, core_on_chip))\n else:\n # We have a p0 x p1 rectangle of chips, which we will tile with rectangular\n # tiles. The first logical dimension correspond to the number of tiles,\n # and the other logical dimensions will correspond to position within a\n # tile.\n num_tiles = logical_shape[0]\n tile_chips = num_cores // num_tiles // p2\n # If we can, we make each tile occupy exactly one row or column of chips.\n # Otherwise, we make each tile approximately square.\n if len(logical_shape) == 2 and tile_chips == p0:\n t0, t1 = [tile_chips, 1]\n elif len(logical_shape) == 2 and tile_chips == p1:\n t0, t1 = [1, tile_chips]\n else:\n # try to make the tile approximately square\n lg_tile_chips = int(math.log(tile_chips, 2))\n t0 = 2 ** (lg_tile_chips // 2)\n # make sure that the tile fits in the mesh - i.e.\n # t0 <= p0\n # t1 == tile_chips // t0 <= p1\n t0 = min(t0, p0)\n t0 = max(t0, tile_chips // p1)\n t1 = tile_chips // t0\n # recursive call to find mapping for one tile\n tile_logical_to_physical = auto_logical_to_physical_tpu(\n logical_shape[1:], [t0, t1, p2], return_coordinates=True)\n tiles_ring = _ring_2d(p0 // t0, p1 // t1)\n logical_to_physical = []\n for logical_pnum in range(num_cores):\n logical_tile_num = logical_pnum // (t0 * t1 * p2)\n logical_pos_in_tile = logical_pnum % (t0 * t1 * p2)\n logical_to_physical.append((\n tiles_ring[logical_tile_num][0] * t0 +\n tile_logical_to_physical[logical_pos_in_tile][0],\n tiles_ring[logical_tile_num][1] * t1 +\n tile_logical_to_physical[logical_pos_in_tile][1],\n tile_logical_to_physical[logical_pos_in_tile][2]))\n if return_coordinates:\n return logical_to_physical\n else:\n return [mtf.processor_coordinates_to_pnum(physical_shape, coord)\n for coord in logical_to_physical]\n"
] |
[
[
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.group",
"tensorflow.python.tpu.ops.tpu_ops.cross_replica_sum",
"tensorflow.compat.v1.to_bfloat16",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.einsum",
"tensorflow.compat.v1.one_hot",
"tensorflow.python.tpu.ops.tpu_ops.collective_permute",
"tensorflow.compat.v1.zeros_like",
"tensorflow.python.tpu.ops.tpu_ops.tpu_replicated_input",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.Variable",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.logging.info"
]
] |
Laetus/autonomous-drive-simulator
|
[
"4fbb10bfe393914e2d1dcee437f34c20c57ae34b"
] |
[
"util.py"
] |
[
"#!/usr/bin/env python3.5\n\"\"\"Simple autonomous drive simulator\"\"\"\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nplt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'\n\n\ndef kmh2ms(v_in_kmph):\n \" convert velocity from km/h to m/s \"\n return v_in_kmph / 3.6\n\n\ndef get_time_values_from_setup(setup):\n \"\"\" read time settings from dict \"\"\"\n setup_t = setup.get('time')\n t_max = setup_t.get('t_max')\n t = setup_t.get('t')\n delta_t = setup_t.get('delta_t')\n n_steps = int((t_max - t) / delta_t + 1)\n return t, delta_t, n_steps, t_max\n\n\ndef animate_result(vehicles, setup):\n \"\"\" visualize the control of th road \"\"\"\n fig = plt.figure()\n lines = []\n t, delta_t, n_steps, t_max = get_time_values_from_setup(setup)\n ax = plt.axes(xlim=(-10, setup.get('lanes').get('lane_length') + 10),\n ylim=(-1, setup.get('lanes').get('n_lanes')))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, metadata=dict(\n artist='Philipp Froehlich'), bitrate=1800)\n\n for i in range(len(vehicles)):\n lobj = ax.plot([], [], linewidth=4)[0]\n lines.append(lobj)\n\n def init():\n for line in lines:\n line.set_data([], [])\n return lines\n\n def animate(frame_number):\n for lnum, line in enumerate(lines):\n act = np.zeros([2, 2])\n # position\n act[:, 0] = vehicles[lnum].position_archive[frame_number, 0]\n # lane\n act[:, 1] = vehicles[lnum].position_archive[frame_number, 1]\n # add saftey_distance\n act[1, 0] += vehicles[lnum].position_archive[frame_number, 2]\n # print(act)\n line.set_data(np.transpose(act))\n return tuple(lines)\n\n plt.xlabel('position [m]')\n\n plt.ylabel(' lane number')\n\n plt.title('animation of the safety bouding box of every car')\n\n anim = animation.FuncAnimation(\n fig, animate, init_func=init, frames=n_steps, interval=50, blit=True)\n\n plt.show()\n print('please wait for plot to save')\n anim.save('result/latest.mp4', writer=writer)\n\n\ndef plot_durations(durations, setup):\n \"\"\" plot durations \"\"\"\n t, delta_t, n_steps, t_max = get_time_values_from_setup(setup)\n fig = plt.figure()\n\n plt.xlabel('Step number ')\n plt.ylabel('Time [s]')\n plt.title('Calculation Times')\n\n ax = plt.axes(xlim=(0, n_steps),\n ylim=(0, max(durations)[0] * 1.1))\n\n x = np.linspace(0, n_steps - 1, n_steps)\n ax.plot(x, durations, 'ro--', linewidth=1,)\n fig.savefig('result/timings.png')\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.animation.FuncAnimation",
"numpy.transpose",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] |
MinhTran0311/imgaug_Minh
|
[
"0266ef52d61a3a892009ae5718fd5d41d8156d6a",
"0266ef52d61a3a892009ae5718fd5d41d8156d6a"
] |
[
"imgaug/augmenters/weather.py",
"test/augmenters/test_geometric.py"
] |
[
"\"\"\"\nAugmenters that create weather effects.\n\nList of augmenters:\n\n * :class:`FastSnowyLandscape`\n * :class:`CloudLayer`\n * :class:`Clouds`\n * :class:`Fog`\n * :class:`SnowflakesLayer`\n * :class:`Snowflakes`\n * :class:`RainLayer`\n * :class:`Rain`\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport numpy as np\n\nimport imgaug as ia\nfrom . import meta, arithmetic, blur, contrast, color as colorlib\nfrom .. import parameters as iap\nfrom .. import dtypes as iadt\n\n\nclass FastSnowyLandscape(meta.Augmenter):\n \"\"\"Convert non-snowy landscapes to snowy ones.\n\n This augmenter expects to get an image that roughly shows a landscape.\n\n This augmenter is based on the method proposed in\n https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f?gi=bca4a13e634c\n\n **Supported dtypes**:\n\n * ``uint8``: yes; fully tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) This augmenter is based on a colorspace conversion to HLS.\n Hence, only RGB ``uint8`` inputs are sensible.\n\n Parameters\n ----------\n lightness_threshold : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n All pixels with lightness in HLS colorspace that is below this value\n will have their lightness increased by `lightness_multiplier`.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the discrete interval ``[a..b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n lightness_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n Multiplier for pixel's lightness value in HLS colorspace.\n Affects all pixels selected via `lightness_threshold`.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the discrete interval ``[a..b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n from_colorspace : str, optional\n The source colorspace of the input images.\n See :func:`~imgaug.augmenters.color.ChangeColorspace.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.FastSnowyLandscape(\n >>> lightness_threshold=140,\n >>> lightness_multiplier=2.5\n >>> )\n\n Search for all pixels in the image with a lightness value in HLS\n colorspace of less than ``140`` and increase their lightness by a factor\n of ``2.5``.\n\n >>> aug = iaa.FastSnowyLandscape(\n >>> lightness_threshold=[128, 200],\n >>> lightness_multiplier=(1.5, 3.5)\n >>> )\n\n Search for all pixels in the image with a lightness value in HLS\n colorspace of less than ``128`` or less than ``200`` (one of these\n values is picked per image) and multiply their lightness by a factor\n of ``x`` with ``x`` being sampled from ``uniform(1.5, 3.5)`` (once per\n image).\n\n >>> aug = iaa.FastSnowyLandscape(\n >>> lightness_threshold=(100, 255),\n >>> lightness_multiplier=(1.0, 4.0)\n >>> )\n\n Similar to the previous example, but the lightness threshold is sampled\n from ``uniform(100, 255)`` (per image) and the multiplier\n from ``uniform(1.0, 4.0)`` (per image). This seems to produce good and\n varied results.\n\n \"\"\"\n\n def __init__(self, lightness_threshold=(100, 255),\n lightness_multiplier=(1.0, 4.0),\n from_colorspace=colorlib.CSPACE_RGB,\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n super(FastSnowyLandscape, self).__init__(\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n\n self.lightness_threshold = iap.handle_continuous_param(\n lightness_threshold, \"lightness_threshold\",\n value_range=(0, 255), tuple_to_uniform=True, list_to_choice=True)\n self.lightness_multiplier = iap.handle_continuous_param(\n lightness_multiplier, \"lightness_multiplier\",\n value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)\n self.from_colorspace = from_colorspace\n\n def _draw_samples(self, augmentables, random_state):\n nb_augmentables = len(augmentables)\n rss = random_state.duplicate(2)\n thresh_samples = self.lightness_threshold.draw_samples(\n (nb_augmentables,), rss[1])\n lmul_samples = self.lightness_multiplier.draw_samples(\n (nb_augmentables,), rss[0])\n return thresh_samples, lmul_samples\n\n # Added in 0.4.0.\n def _augment_batch_(self, batch, random_state, parents, hooks):\n if batch.images is None:\n return batch\n\n images = batch.images\n\n thresh_samples, lmul_samples = self._draw_samples(images, random_state)\n\n gen = enumerate(zip(images, thresh_samples, lmul_samples))\n for i, (image, thresh, lmul) in gen:\n image_hls = colorlib.change_colorspace_(\n image, colorlib.CSPACE_HLS, self.from_colorspace)\n cvt_dtype = image_hls.dtype\n image_hls = image_hls.astype(np.float64)\n lightness = image_hls[..., 1]\n\n lightness[lightness < thresh] *= lmul\n\n image_hls = iadt.restore_dtypes_(image_hls, cvt_dtype)\n image_rgb = colorlib.change_colorspace_(\n image_hls, self.from_colorspace, colorlib.CSPACE_HLS)\n\n batch.images[i] = image_rgb\n\n return batch\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.lightness_threshold, self.lightness_multiplier]\n\n\n# TODO add examples and add these to the overview docs\n# TODO add perspective transform to each cloud layer to make them look more\n# distant?\n# TODO alpha_mean and density overlap - remove one of them\nclass CloudLayer(meta.Augmenter):\n \"\"\"Add a single layer of clouds to an image.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; indirectly tested (1)\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: yes; not tested\n * ``float32``: yes; not tested\n * ``float64``: yes; not tested\n * ``float128``: yes; not tested (2)\n * ``bool``: no\n\n - (1) Indirectly tested via tests for :class:`Clouds`` and :class:`Fog`\n - (2) Note that random values are usually sampled as ``int64`` or\n ``float64``, which ``float128`` images would exceed. Note also\n that random values might have to upscaled, which is done\n via :func:`~imgaug.imgaug.imresize_many_images` and has its own\n limited dtype support (includes however floats up to ``64bit``).\n\n Parameters\n ----------\n intensity_mean : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Mean intensity of the clouds (i.e. mean color).\n Recommended to be in the interval ``[190, 255]``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly\n sampled per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n intensity_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent of the frequency noise used to add fine intensity to the\n mean intensity.\n Recommended to be in the interval ``[-2.5, -1.5]``.\n See :func:`~imgaug.parameters.FrequencyNoise.__init__` for details.\n\n intensity_coarse_scale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Standard deviation of the gaussian distribution used to add more\n localized intensity to the mean intensity. Sampled in low resolution\n space, i.e. affects final intensity on a coarse level.\n Recommended to be in the interval ``(0, 10]``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n alpha_min : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Minimum alpha when blending cloud noise with the image.\n High values will lead to clouds being \"everywhere\".\n Recommended to usually be at around ``0.0`` for clouds and ``>0`` for\n fog.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n alpha_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Multiplier for the sampled alpha values. High values will lead to\n denser clouds wherever they are visible.\n Recommended to be in the interval ``[0.3, 1.0]``.\n Note that this parameter currently overlaps with `density_multiplier`,\n which is applied a bit later to the alpha mask.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n alpha_size_px_max : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the image size at which the alpha mask is sampled.\n Lower values will lead to coarser alpha masks and hence larger\n clouds (and empty areas).\n See :func:`~imgaug.parameters.FrequencyNoise.__init__` for details.\n\n alpha_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent of the frequency noise used to sample the alpha mask.\n Similarly to `alpha_size_max_px`, lower values will lead to coarser\n alpha patterns.\n Recommended to be in the interval ``[-4.0, -1.5]``.\n See :func:`~imgaug.parameters.FrequencyNoise.__init__` for details.\n\n sparsity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent applied late to the alpha mask. Lower values will lead to\n coarser cloud patterns, higher values to finer patterns.\n Recommended to be somewhere around ``1.0``.\n Do not deviate far from that value, otherwise the alpha mask might\n get weird patterns with sudden fall-offs to zero that look very\n unnatural.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n density_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Late multiplier for the alpha mask, similar to `alpha_multiplier`.\n Set this higher to get \"denser\" clouds wherever they are visible.\n Recommended to be around ``[0.5, 1.5]``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n \"\"\"\n\n def __init__(self, intensity_mean, intensity_freq_exponent,\n intensity_coarse_scale, alpha_min, alpha_multiplier,\n alpha_size_px_max, alpha_freq_exponent, sparsity,\n density_multiplier,\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n super(CloudLayer, self).__init__(\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n self.intensity_mean = iap.handle_continuous_param(\n intensity_mean, \"intensity_mean\")\n self.intensity_freq_exponent = intensity_freq_exponent\n self.intensity_coarse_scale = intensity_coarse_scale\n self.alpha_min = iap.handle_continuous_param(alpha_min, \"alpha_min\")\n self.alpha_multiplier = iap.handle_continuous_param(\n alpha_multiplier, \"alpha_multiplier\")\n self.alpha_size_px_max = alpha_size_px_max\n self.alpha_freq_exponent = alpha_freq_exponent\n self.sparsity = iap.handle_continuous_param(sparsity, \"sparsity\")\n self.density_multiplier = iap.handle_continuous_param(\n density_multiplier, \"density_multiplier\")\n\n # Added in 0.4.0.\n def _augment_batch_(self, batch, random_state, parents, hooks):\n if batch.images is None:\n return batch\n\n images = batch.images\n\n rss = random_state.duplicate(len(images))\n for i, (image, rs) in enumerate(zip(images, rss)):\n batch.images[i] = self.draw_on_image(image, rs)\n return batch\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.intensity_mean,\n self.alpha_min,\n self.alpha_multiplier,\n self.alpha_size_px_max,\n self.alpha_freq_exponent,\n self.intensity_freq_exponent,\n self.sparsity,\n self.density_multiplier,\n self.intensity_coarse_scale]\n\n def draw_on_image(self, image, random_state):\n iadt.gate_dtypes_strs(\n image,\n allowed=\"uint8 float16 float32 float64 float128\",\n disallowed=\"bool uint16 uint32 uint64 int8 int16 int32 int64\",\n augmenter=self\n )\n\n alpha, intensity = self.generate_maps(image, random_state)\n alpha = alpha[..., np.newaxis]\n intensity = intensity[..., np.newaxis]\n\n if image.dtype.kind == \"f\":\n intensity = intensity.astype(image.dtype)\n return (1 - alpha) * image + alpha * intensity\n\n intensity = np.clip(intensity, 0, 255)\n # TODO use blend_alpha_() here\n return np.clip(\n (1 - alpha) * image.astype(alpha.dtype)\n + alpha * intensity.astype(alpha.dtype),\n 0,\n 255\n ).astype(np.uint8)\n\n def generate_maps(self, image, random_state):\n intensity_mean_sample = self.intensity_mean.draw_sample(random_state)\n alpha_min_sample = self.alpha_min.draw_sample(random_state)\n alpha_multiplier_sample = \\\n self.alpha_multiplier.draw_sample(random_state)\n alpha_size_px_max = self.alpha_size_px_max\n intensity_freq_exponent = self.intensity_freq_exponent\n alpha_freq_exponent = self.alpha_freq_exponent\n sparsity_sample = self.sparsity.draw_sample(random_state)\n density_multiplier_sample = \\\n self.density_multiplier.draw_sample(random_state)\n\n height, width = image.shape[0:2]\n rss_alpha, rss_intensity = random_state.duplicate(2)\n\n intensity_coarse = self._generate_intensity_map_coarse(\n height, width, intensity_mean_sample,\n iap.Normal(0, scale=self.intensity_coarse_scale),\n rss_intensity\n )\n intensity_fine = self._generate_intensity_map_fine(\n height, width, intensity_mean_sample, intensity_freq_exponent,\n rss_intensity)\n intensity = intensity_coarse + intensity_fine\n\n alpha = self._generate_alpha_mask(\n height, width, alpha_min_sample, alpha_multiplier_sample,\n alpha_freq_exponent, alpha_size_px_max, sparsity_sample,\n density_multiplier_sample, rss_alpha)\n\n return alpha, intensity\n\n @classmethod\n def _generate_intensity_map_coarse(cls, height, width, intensity_mean,\n intensity_local_offset, random_state):\n # TODO (8, 8) might be too simplistic for some image sizes\n height_intensity, width_intensity = (8, 8)\n intensity = (\n intensity_mean\n + intensity_local_offset.draw_samples(\n (height_intensity, width_intensity), random_state)\n )\n intensity = ia.imresize_single_image(\n intensity, (height, width), interpolation=\"cubic\")\n\n return intensity\n\n @classmethod\n def _generate_intensity_map_fine(cls, height, width, intensity_mean,\n exponent, random_state):\n intensity_details_generator = iap.FrequencyNoise(\n exponent=exponent,\n size_px_max=max(height, width, 1), # 1 here for case H, W being 0\n upscale_method=\"cubic\"\n )\n intensity_details = intensity_details_generator.draw_samples(\n (height, width), random_state)\n return intensity_mean * ((2*intensity_details - 1.0)/5.0)\n\n @classmethod\n def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier,\n exponent, alpha_size_px_max, sparsity,\n density_multiplier, random_state):\n alpha_generator = iap.FrequencyNoise(\n exponent=exponent,\n size_px_max=alpha_size_px_max,\n upscale_method=\"cubic\"\n )\n alpha_local = alpha_generator.draw_samples(\n (height, width), random_state)\n alpha = alpha_min + (alpha_multiplier * alpha_local)\n alpha = (alpha ** sparsity) * density_multiplier\n alpha = np.clip(alpha, 0.0, 1.0)\n\n return alpha\n\n\n# TODO add vertical gradient alpha to have clouds only at skylevel/groundlevel\n# TODO add configurable parameters\nclass Clouds(meta.SomeOf):\n \"\"\"\n Add clouds to images.\n\n This is a wrapper around :class:`~imgaug.augmenters.weather.CloudLayer`.\n It executes 1 to 2 layers per image, leading to varying densities and\n frequency patterns of clouds.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested\n with ``96x128``, ``192x256`` and ``960x1280``.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range\n of ``uint8``. While other dtypes may be accepted, they will lead\n to images augmented in ways inappropriate for the respective\n dtype.\n\n Parameters\n ----------\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Clouds()\n\n Create an augmenter that adds clouds to images.\n\n \"\"\"\n\n def __init__(self,\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n layers = [\n CloudLayer(\n intensity_mean=(196, 255),\n intensity_freq_exponent=(-2.5, -2.0),\n intensity_coarse_scale=10,\n alpha_min=0,\n alpha_multiplier=(0.25, 0.75),\n alpha_size_px_max=(2, 8),\n alpha_freq_exponent=(-2.5, -2.0),\n sparsity=(0.8, 1.0),\n density_multiplier=(0.5, 1.0),\n seed=seed,\n random_state=random_state,\n deterministic=deterministic\n ),\n CloudLayer(\n intensity_mean=(196, 255),\n intensity_freq_exponent=(-2.0, -1.0),\n intensity_coarse_scale=10,\n alpha_min=0,\n alpha_multiplier=(0.5, 1.0),\n alpha_size_px_max=(64, 128),\n alpha_freq_exponent=(-2.0, -1.0),\n sparsity=(1.0, 1.4),\n density_multiplier=(0.8, 1.5),\n seed=seed,\n random_state=random_state,\n deterministic=deterministic\n )\n ]\n\n super(Clouds, self).__init__(\n (1, 2),\n children=layers,\n random_order=False,\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n\n\n# TODO add vertical gradient alpha to have fog only at skylevel/groundlevel\n# TODO add configurable parameters\nclass Fog(CloudLayer):\n \"\"\"Add fog to images.\n\n This is a wrapper around :class:`~imgaug.augmenters.weather.CloudLayer`.\n It executes a single layer per image with a configuration leading to\n fairly dense clouds with low-frequency patterns.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested\n with ``96x128``, ``192x256`` and ``960x1280``.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range\n of ``uint8``. While other dtypes may be accepted, they will lead\n to images augmented in ways inappropriate for the respective\n dtype.\n\n Parameters\n ----------\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Fog()\n\n Create an augmenter that adds fog to images.\n\n \"\"\"\n\n def __init__(self,\n intensity_mean=(220, 255),\n alpha_min=(0.7,0.9),\n alpha_multiplier=0.3,\n density_multiplier=(0.4, 0.9),\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n super(Fog, self).__init__(\n intensity_mean=intensity_mean,\n intensity_freq_exponent=(-2.0, -1.5),\n intensity_coarse_scale=2,\n alpha_min=alpha_min,\n alpha_multiplier=alpha_multiplier,\n alpha_size_px_max=(2, 8),\n alpha_freq_exponent=(-4.0, -2.0),\n sparsity=0.9,\n density_multiplier=density_multiplier,\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n\n\n# TODO add examples and add these to the overview docs\n# TODO snowflakes are all almost 100% white, add some grayish tones and\n# maybe color to them\nclass SnowflakesLayer(meta.Augmenter):\n \"\"\"Add a single layer of falling snowflakes to images.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; indirectly tested (1)\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: no\n * ``float32``: no\n * ``float64``: no\n * ``float128``: no\n * ``bool``: no\n\n - (1) indirectly tested via tests for :class:`Snowflakes`\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in\n low resolution space to be a snowflake.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be in the interval ``[0.01, 0.075]``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more\n similarly sized snowflakes.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be around ``0.5``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at\n which snowflakes are sampled. Higher values mean that the resolution\n is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid values are in the interval ``(0.0, 1.0]``.\n Recommended values:\n\n * On 96x128 a value of ``(0.1, 0.4)`` worked well.\n * On 192x256 a value of ``(0.2, 0.7)`` worked well.\n * On 960x1280 a value of ``(0.7, 0.95)`` worked well.\n\n Datatype behaviour:\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean\n that the snowflakes are more similarly sized.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be around ``0.5``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where\n ``0.0`` is motion blur that points straight upwards.\n Recommended to be in the interval ``[-30, 30]``.\n See also :func:`~imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the\n motion blur's kernel size. It follows roughly the form\n ``kernel_size = image_size * speed``. Hence, values around ``1.0``\n denote that the motion blur should \"stretch\" each snowflake over the\n whole image.\n\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended values:\n\n * On 96x128 a value of ``(0.01, 0.05)`` worked well.\n * On 192x256 a value of ``(0.007, 0.03)`` worked well.\n * On 960x1280 a value of ``(0.001, 0.03)`` worked well.\n\n Datatype behaviour:\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Standard deviation (as a fraction of the image size) of gaussian blur\n applied to the snowflakes.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be in the interval ``[0.0001, 0.001]``. May still\n require tinkering based on image size.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n blur_sigma_limits : tuple of float, optional\n Controls allowed min and max values of `blur_sigma_fraction`\n after(!) multiplication with the image size. First value is the\n minimum, second value is the maximum. Values outside of that range\n will be clipped to be within that range. This prevents extreme\n values for very small or large images.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n \"\"\"\n\n def __init__(self, density, density_uniformity, flake_size,\n flake_size_uniformity, angle, speed, blur_sigma_fraction,\n blur_sigma_limits=(0.5, 3.75),\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n super(SnowflakesLayer, self).__init__(\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n self.density = density\n self.density_uniformity = iap.handle_continuous_param(\n density_uniformity, \"density_uniformity\", value_range=(0.0, 1.0))\n self.flake_size = iap.handle_continuous_param(\n flake_size, \"flake_size\", value_range=(0.0+1e-4, 1.0))\n self.flake_size_uniformity = iap.handle_continuous_param(\n flake_size_uniformity, \"flake_size_uniformity\",\n value_range=(0.0, 1.0))\n self.angle = iap.handle_continuous_param(angle, \"angle\")\n self.speed = iap.handle_continuous_param(\n speed, \"speed\", value_range=(0.0, 1.0))\n self.blur_sigma_fraction = iap.handle_continuous_param(\n blur_sigma_fraction, \"blur_sigma_fraction\", value_range=(0.0, 1.0))\n\n # (min, max), same for all images\n self.blur_sigma_limits = blur_sigma_limits\n\n # (height, width), same for all images\n self.gate_noise_size = (8, 8)\n\n # Added in 0.4.0.\n def _augment_batch_(self, batch, random_state, parents, hooks):\n if batch.images is None:\n return batch\n\n images = batch.images\n\n rss = random_state.duplicate(len(images))\n for i, (image, rs) in enumerate(zip(images, rss)):\n batch.images[i] = self.draw_on_image(image, rs)\n return batch\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.density,\n self.density_uniformity,\n self.flake_size,\n self.flake_size_uniformity,\n self.angle,\n self.speed,\n self.blur_sigma_fraction,\n self.blur_sigma_limits,\n self.gate_noise_size]\n\n def draw_on_image(self, image, random_state):\n assert image.ndim == 3, (\n \"Expected input image to be three-dimensional, \"\n \"got %d dimensions.\" % (image.ndim,))\n assert image.shape[2] in [1, 3], (\n \"Expected to get image with a channel axis of size 1 or 3, \"\n \"got %d (shape: %s)\" % (image.shape[2], image.shape))\n\n rss = random_state.duplicate(2)\n\n flake_size_sample = self.flake_size.draw_sample(random_state)\n flake_size_uniformity_sample = self.flake_size_uniformity.draw_sample(\n random_state)\n angle_sample = self.angle.draw_sample(random_state)\n speed_sample = self.speed.draw_sample(random_state)\n blur_sigma_fraction_sample = self.blur_sigma_fraction.draw_sample(\n random_state)\n\n height, width, nb_channels = image.shape\n downscale_factor = np.clip(1.0 - flake_size_sample, 0.001, 1.0)\n height_down = max(1, int(height*downscale_factor))\n width_down = max(1, int(width*downscale_factor))\n noise = self._generate_noise(\n height_down,\n width_down,\n self.density,\n rss[0]\n )\n\n # gate the sampled noise via noise in range [0.0, 1.0]\n # this leads to less flakes in some areas of the image and more in\n # other areas\n gate_noise = iap.Beta(1.0, 1.0 - self.density_uniformity)\n noise = self._gate(noise, gate_noise, self.gate_noise_size, rss[1])\n noise = ia.imresize_single_image(noise, (height, width),\n interpolation=\"cubic\")\n\n # apply a bit of gaussian blur and then motion blur according to\n # angle and speed\n sigma = max(height, width) * blur_sigma_fraction_sample\n sigma = np.clip(sigma,\n self.blur_sigma_limits[0], self.blur_sigma_limits[1])\n noise_small_blur = self._blur(noise, sigma)\n noise_small_blur = self._motion_blur(noise_small_blur,\n angle=angle_sample,\n speed=speed_sample,\n random_state=random_state)\n\n noise_small_blur_rgb = self._postprocess_noise(\n noise_small_blur, flake_size_uniformity_sample, nb_channels)\n\n return self._blend(image, speed_sample, noise_small_blur_rgb)\n\n @classmethod\n def _generate_noise(cls, height, width, density, random_state):\n noise = arithmetic.Salt(p=density, random_state=random_state)\n return noise.augment_image(np.zeros((height, width), dtype=np.uint8))\n\n @classmethod\n def _gate(cls, noise, gate_noise, gate_size, random_state):\n # the beta distribution here has most of its weight around 1.0 and\n # will only rarely sample values around 0.0 the average of the\n # sampled values seems to be at around 0.6-0.75\n gate_noise = gate_noise.draw_samples(gate_size, random_state)\n gate_noise_up = ia.imresize_single_image(gate_noise, noise.shape[0:2],\n interpolation=\"cubic\")\n gate_noise_up = np.clip(gate_noise_up, 0.0, 1.0)\n return np.clip(\n noise.astype(np.float32) * gate_noise_up, 0, 255\n ).astype(np.uint8)\n\n @classmethod\n def _blur(cls, noise, sigma):\n return blur.blur_gaussian_(noise, sigma=sigma)\n\n @classmethod\n def _motion_blur(cls, noise, angle, speed, random_state):\n size = max(noise.shape[0:2])\n k = int(speed * size)\n if k <= 1:\n return noise\n\n # we use max(k, 3) here because MotionBlur errors for anything less\n # than 3\n blurer = blur.MotionBlur(\n k=max(k, 3), angle=angle, direction=1.0, random_state=random_state)\n return blurer.augment_image(noise)\n\n # Added in 0.4.0.\n @classmethod\n def _postprocess_noise(cls, noise_small_blur,\n flake_size_uniformity_sample, nb_channels):\n # use contrast adjustment of noise to make the flake size a bit less\n # uniform then readjust the noise values to make them more visible\n # again\n gain = 1.0 + 2*(1 - flake_size_uniformity_sample)\n gain_adj = 1.0 + 5*(1 - flake_size_uniformity_sample)\n noise_small_blur = contrast.GammaContrast(gain).augment_image(\n noise_small_blur)\n noise_small_blur = noise_small_blur.astype(np.float32) * gain_adj\n noise_small_blur_rgb = np.tile(\n noise_small_blur[..., np.newaxis], (1, 1, nb_channels))\n return noise_small_blur_rgb\n\n # Added in 0.4.0.\n @classmethod\n def _blend(cls, image, speed_sample, noise_small_blur_rgb):\n # blend:\n # sum for a bit of glowy, hardly visible flakes\n # max for the main flakes\n image_f32 = image.astype(np.float32)\n image_f32 = cls._blend_by_sum(\n image_f32, (0.1 + 20*speed_sample) * noise_small_blur_rgb)\n image_f32 = cls._blend_by_max(\n image_f32, (1.0 + 20*speed_sample) * noise_small_blur_rgb)\n return image_f32\n\n # TODO replace this by a function from module blend.py\n @classmethod\n def _blend_by_sum(cls, image_f32, noise_small_blur_rgb):\n image_f32 = image_f32 + noise_small_blur_rgb\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n\n # TODO replace this by a function from module blend.py\n @classmethod\n def _blend_by_max(cls, image_f32, noise_small_blur_rgb):\n image_f32 = np.maximum(image_f32, noise_small_blur_rgb)\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n\n\nclass Snowflakes(meta.SomeOf):\n \"\"\"Add falling snowflakes to images.\n\n This is a wrapper around\n :class:`~imgaug.augmenters.weather.SnowflakesLayer`. It executes 1 to 3\n layers per image.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range\n of ``uint8``. While other dtypes may be accepted, they will lead\n to images augmented in ways inappropriate for the respective\n dtype.\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in\n low resolution space to be a snowflake.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be in the interval ``[0.01, 0.075]``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more\n similarly sized snowflakes.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be around ``0.5``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at\n which snowflakes are sampled. Higher values mean that the resolution\n is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid values are in the interval ``(0.0, 1.0]``.\n Recommended values:\n\n * On ``96x128`` a value of ``(0.1, 0.4)`` worked well.\n * On ``192x256`` a value of ``(0.2, 0.7)`` worked well.\n * On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.\n\n Datatype behaviour:\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean\n that the snowflakes are more similarly sized.\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended to be around ``0.5``.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where\n ``0.0`` is motion blur that points straight upwards.\n Recommended to be in the interval ``[-30, 30]``.\n See also :func:`~imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the\n motion blur's kernel size. It follows roughly the form\n ``kernel_size = image_size * speed``. Hence, values around ``1.0``\n denote that the motion blur should \"stretch\" each snowflake over\n the whole image.\n\n Valid values are in the interval ``[0.0, 1.0]``.\n Recommended values:\n\n * On ``96x128`` a value of ``(0.01, 0.05)`` worked well.\n * On ``192x256`` a value of ``(0.007, 0.03)`` worked well.\n * On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.\n\n Datatype behaviour:\n\n * If a ``number``, then that value will always be used.\n * If a ``tuple`` ``(a, b)``, then a value will be uniformly sampled\n per image from the interval ``[a, b]``.\n * If a ``list``, then a random value will be sampled from that\n ``list`` per image.\n * If a ``StochasticParameter``, then a value will be sampled\n per image from that parameter.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))\n\n Add snowflakes to small images (around ``96x128``).\n\n >>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))\n\n Add snowflakes to medium-sized images (around ``192x256``).\n\n >>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))\n\n Add snowflakes to large images (around ``960x1280``).\n\n \"\"\"\n\n def __init__(self, density=(0.005, 0.075), density_uniformity=(0.3, 0.9),\n flake_size=(0.2, 0.7), flake_size_uniformity=(0.4, 0.8),\n angle=(-30, 30), speed=(0.007, 0.03),\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n layer = SnowflakesLayer(\n density=density,\n density_uniformity=density_uniformity,\n flake_size=flake_size,\n flake_size_uniformity=flake_size_uniformity,\n angle=angle,\n speed=speed,\n blur_sigma_fraction=(0.0001, 0.001),\n seed=seed,\n random_state=random_state,\n deterministic=deterministic\n )\n\n super(Snowflakes, self).__init__(\n (1, 3),\n children=[layer.deepcopy() for _ in range(3)],\n random_order=False,\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n\n\nclass RainLayer(SnowflakesLayer):\n \"\"\"Add a single layer of falling raindrops to images.\n\n Added in 0.4.0.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; indirectly tested (1)\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: no\n * ``float32``: no\n * ``float64``: no\n * ``float128``: no\n * ``bool``: no\n\n - (1) indirectly tested via tests for :class:`Rain`\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n drop_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as `flake_size` in\n :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n drop_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as `flake_size_uniformity` in\n :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n blur_sigma_limits : tuple of float, optional\n Same as in :class:`~imgaug.augmenters.weather.SnowflakesLayer`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n \"\"\"\n\n # Added in 0.4.0.\n def __init__(self, density, density_uniformity, drop_size,\n drop_size_uniformity, angle, speed, blur_sigma_fraction,\n blur_sigma_limits=(0.5, 3.75),\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n super(RainLayer, self).__init__(\n density, density_uniformity, drop_size,\n drop_size_uniformity, angle, speed, blur_sigma_fraction,\n blur_sigma_limits=blur_sigma_limits,\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n\n # Added in 0.4.0.\n @classmethod\n def _blur(cls, noise, sigma):\n return noise\n\n # Added in 0.4.0.\n @classmethod\n def _postprocess_noise(cls, noise_small_blur,\n flake_size_uniformity_sample, nb_channels):\n noise_small_blur_rgb = np.tile(\n noise_small_blur[..., np.newaxis], (1, 1, nb_channels))\n return noise_small_blur_rgb\n\n # Added in 0.4.0.\n @classmethod\n def _blend(cls, image, speed_sample, noise_small_blur_rgb):\n # We set the mean color based on the noise here. That's a pseudo-random\n # approach that saves us from adding the random state as a parameter.\n # Note that the sum of noise_small_blur_rgb can be 0 when at least one\n # image axis size is 0.\n noise_sum = np.sum(noise_small_blur_rgb.flat[0:1000])\n noise_sum = noise_sum if noise_sum > 0 else 1\n drop_mean_color = 110 + (240 - 110) % noise_sum\n noise_small_blur_rgb = noise_small_blur_rgb / 255.0\n # The 1.3 multiplier increases the visibility of drops a bit.\n noise_small_blur_rgb = np.clip(1.3 * noise_small_blur_rgb, 0, 1.0)\n image_f32 = image.astype(np.float32)\n image_f32 = (\n (1 - noise_small_blur_rgb) * image_f32\n + noise_small_blur_rgb * drop_mean_color\n )\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n\n\nclass Rain(meta.SomeOf):\n \"\"\"Add falling snowflakes to images.\n\n This is a wrapper around\n :class:`~imgaug.augmenters.weather.RainLayer`. It executes 1 to 3\n layers per image.\n\n .. note::\n\n This augmenter currently seems to work best for medium-sized images\n around ``192x256``. For smaller images, you may want to increase the\n `speed` value to e.g. ``(0.1, 0.3)``, otherwise the drops tend to\n look like snowflakes. For larger images, you may want to increase\n the `drop_size` to e.g. ``(0.10, 0.20)``.\n\n Added in 0.4.0.\n\n **Supported dtypes**:\n\n * ``uint8``: yes; tested\n * ``uint16``: no (1)\n * ``uint32``: no (1)\n * ``uint64``: no (1)\n * ``int8``: no (1)\n * ``int16``: no (1)\n * ``int32``: no (1)\n * ``int64``: no (1)\n * ``float16``: no (1)\n * ``float32``: no (1)\n * ``float64``: no (1)\n * ``float128``: no (1)\n * ``bool``: no (1)\n\n - (1) Parameters of this augmenter are optimized for the value range\n of ``uint8``. While other dtypes may be accepted, they will lead\n to images augmented in ways inappropriate for the respective\n dtype.\n\n Parameters\n ----------\n drop_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n See :class:`~imgaug.augmenters.weather.RainLayer`.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n See :class:`~imgaug.augmenters.weather.RainLayer`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n Old name for parameter `seed`.\n Its usage will not yet cause a deprecation warning,\n but it is still recommended to use `seed` now.\n Outdated since 0.4.0.\n\n deterministic : bool, optional\n Deprecated since 0.4.0.\n See method ``to_deterministic()`` for an alternative and for\n details about what the \"deterministic mode\" actually does.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Rain(speed=(0.1, 0.3))\n\n Add rain to small images (around ``96x128``).\n\n >>> aug = iaa.Rain()\n\n Add rain to medium sized images (around ``192x256``).\n\n >>> aug = iaa.Rain(drop_size=(0.10, 0.20))\n\n Add rain to large images (around ``960x1280``).\n\n \"\"\"\n\n # Added in 0.4.0.\n def __init__(self, nb_iterations=(1, 3),\n density=(0.03, 0.14),\n density_uniformity=(0.8, 1.0),\n drop_size_uniformity=(0.2, 0.5),\n angle=(-15, 15),\n blur_sigma_fraction=(0.001, 0.001),\n drop_size=(0.01, 0.02),\n speed=(0.04, 0.20),\n seed=None, name=None,\n random_state=\"deprecated\", deterministic=\"deprecated\"):\n layer = RainLayer(\n density=density,\n density_uniformity=density_uniformity,\n drop_size=drop_size,\n drop_size_uniformity=drop_size_uniformity,\n angle=angle,\n speed=speed,\n blur_sigma_fraction=blur_sigma_fraction,\n seed=seed,\n random_state=random_state,\n deterministic=deterministic\n )\n\n super(Rain, self).__init__(\n nb_iterations,\n children=[layer.deepcopy() for _ in range(3)],\n random_order=False,\n seed=seed, name=name,\n random_state=random_state, deterministic=deterministic)\n",
"from __future__ import print_function, division, absolute_import\n\nimport itertools\nimport warnings\nimport sys\n# unittest only added in 3.4 self.subTest()\nif sys.version_info[0] < 3 or sys.version_info[1] < 4:\n import unittest2 as unittest\nelse:\n import unittest\n# unittest.mock is not available in 2.7 (though unittest2 might contain it?)\ntry:\n import unittest.mock as mock\nexcept ImportError:\n import mock\n\nimport numpy as np\nimport six.moves as sm\nimport skimage.morphology\nimport cv2\n\nimport imgaug as ia\nfrom imgaug import random as iarandom\nfrom imgaug import augmenters as iaa\nfrom imgaug import parameters as iap\nfrom imgaug import dtypes as iadt\nfrom imgaug.testutils import (\n array_equal_lists, keypoints_equal, reseed, assert_cbaois_equal,\n runtest_pickleable_uint8_img, assertWarns, is_parameter_instance)\nfrom imgaug.augmentables.heatmaps import HeatmapsOnImage\nfrom imgaug.augmentables.segmaps import SegmentationMapsOnImage\nimport imgaug.augmenters.geometric as geometriclib\n\n\ndef _assert_same_min_max(observed, actual):\n assert np.isclose(observed.min_value, actual.min_value, rtol=0, atol=1e-6)\n assert np.isclose(observed.max_value, actual.max_value, rtol=0, atol=1e-6)\n\n\ndef _assert_same_shape(observed, actual):\n assert observed.shape == actual.shape\n\n# TODO add more tests for Affine .mode\n# TODO add more tests for Affine shear\n\n\nclass TestAffine(unittest.TestCase):\n def test_get_parameters(self):\n aug = iaa.Affine(scale=1, translate_px=2, rotate=3, shear=4,\n order=1, cval=0, mode=\"constant\", backend=\"cv2\",\n fit_output=True)\n\n params = aug.get_parameters()\n\n assert is_parameter_instance(params[0], iap.Deterministic) # scale\n assert isinstance(params[1], tuple) # translate\n assert is_parameter_instance(params[2], iap.Deterministic) # rotate\n assert is_parameter_instance(params[3], iap.Deterministic) # shear\n assert params[0].value == 1 # scale\n assert params[1][0].value == 2 # translate\n assert params[2].value == 3 # rotate\n assert params[3].value == 4 # shear\n assert params[4].value == 1 # order\n assert params[5].value == 0 # cval\n assert params[6].value == \"constant\" # mode\n assert params[7] == \"cv2\" # backend\n assert params[8] is True # fit_output\n\n\nclass TestAffine___init__(unittest.TestCase):\n def test___init___scale_is_stochastic_parameter(self):\n aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))\n\n assert is_parameter_instance(aug.scale, iap.Uniform)\n assert is_parameter_instance(aug.scale.a, iap.Deterministic)\n assert is_parameter_instance(aug.scale.b, iap.Deterministic)\n assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8\n assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8\n\n def test___init___translate_percent_is_stochastic_parameter(self):\n aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))\n\n assert isinstance(aug.translate, tuple)\n assert is_parameter_instance(aug.translate[0], iap.Uniform)\n assert is_parameter_instance(aug.translate[0].a, iap.Deterministic)\n assert is_parameter_instance(aug.translate[0].b, iap.Deterministic)\n assert 0.7 - 1e-8 < aug.translate[0].a.value < 0.7 + 1e-8\n assert 0.9 - 1e-8 < aug.translate[0].b.value < 0.9 + 1e-8\n assert aug.translate[1] is None\n assert aug.translate[2] == \"percent\"\n\n def test___init___translate_px_is_stochastic_parameter(self):\n aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))\n\n assert isinstance(aug.translate, tuple)\n assert is_parameter_instance(aug.translate[0], iap.DiscreteUniform)\n assert is_parameter_instance(aug.translate[0].a, iap.Deterministic)\n assert is_parameter_instance(aug.translate[0].b, iap.Deterministic)\n assert aug.translate[0].a.value == 1\n assert aug.translate[0].b.value == 10\n assert aug.translate[1] is None\n assert aug.translate[2] == \"px\"\n\n def test___init___rotate_is_stochastic_parameter(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20),\n shear=0)\n\n assert is_parameter_instance(aug.rotate, iap.Uniform)\n assert is_parameter_instance(aug.rotate.a, iap.Deterministic)\n assert aug.rotate.a.value == 10\n assert is_parameter_instance(aug.rotate.b, iap.Deterministic)\n assert aug.rotate.b.value == 20\n\n def test___init___shear_is_stochastic_parameter(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0,\n shear=iap.Uniform(10, 20))\n\n assert is_parameter_instance(aug.shear, iap.Uniform)\n assert is_parameter_instance(aug.shear.a, iap.Deterministic)\n assert aug.shear.a.value == 10\n assert is_parameter_instance(aug.shear.b, iap.Deterministic)\n assert aug.shear.b.value == 20\n\n def test___init___cval_is_all(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=ia.ALL)\n\n assert is_parameter_instance(aug.cval, iap.Uniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 0\n assert aug.cval.b.value == 255\n\n def test___init___cval_is_stochastic_parameter(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=iap.DiscreteUniform(1, 5))\n\n assert is_parameter_instance(aug.cval, iap.DiscreteUniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 1\n assert aug.cval.b.value == 5\n\n def test___init___mode_is_all(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=ia.ALL)\n assert is_parameter_instance(aug.mode, iap.Choice)\n\n def test___init___mode_is_string(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=\"edge\")\n assert is_parameter_instance(aug.mode, iap.Deterministic)\n assert aug.mode.value == \"edge\"\n\n def test___init___mode_is_list(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=[\"constant\", \"edge\"])\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert (\n len(aug.mode.a) == 2\n and \"constant\" in aug.mode.a\n and \"edge\" in aug.mode.a)\n\n def test___init___mode_is_stochastic_parameter(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=iap.Choice([\"constant\", \"edge\"]))\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert (\n len(aug.mode.a) == 2\n and \"constant\" in aug.mode.a\n and \"edge\" in aug.mode.a)\n\n def test___init___fit_output_is_true(self):\n aug = iaa.Affine(fit_output=True)\n assert aug.fit_output is True\n\n # ------------\n # exceptions for bad inputs\n # ------------\n def test___init___bad_datatype_for_scale_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(scale=False)\n\n def test___init___bad_datatype_for_translate_px_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(translate_px=False)\n\n def test___init___bad_datatype_for_translate_percent_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(translate_percent=False)\n\n def test___init___bad_datatype_for_rotate_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(scale=1.0, translate_px=0, rotate=False, shear=0,\n cval=0)\n\n def test___init___bad_datatype_for_shear_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=False,\n cval=0)\n\n def test___init___bad_datatype_for_cval_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=None)\n\n def test___init___bad_datatype_for_mode_fails(self):\n with self.assertRaises(Exception):\n _ = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=False)\n\n def test___init___bad_datatype_for_order_fails(self):\n # bad order datatype in case of backend=cv2\n with self.assertRaises(Exception):\n _ = iaa.Affine(backend=\"cv2\", order=\"test\")\n\n def test___init___nonexistent_order_for_cv2_fails(self):\n # non-existent order in case of backend=cv2\n with self.assertRaises(AssertionError):\n _ = iaa.Affine(backend=\"cv2\", order=-1)\n\n\n# TODO add test with multiple images\nclass TestAffine_noop(unittest.TestCase):\n def setUp(self):\n reseed()\n\n @property\n def base_img(self):\n base_img = np.array([[0, 0, 0],\n [0, 255, 0],\n [0, 0, 0]], dtype=np.uint8)\n return base_img[:, :, np.newaxis]\n\n @property\n def images(self):\n return np.array([self.base_img])\n\n @property\n def kpsoi(self):\n kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)]\n return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]\n\n @property\n def psoi(self):\n polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]\n return [ia.PolygonsOnImage(polygons, shape=self.base_img.shape)]\n\n @property\n def lsoi(self):\n ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]\n return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]\n\n @property\n def bbsoi(self):\n bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]\n\n def test_image_noop(self):\n # no translation/scale/rotate/shear, shouldnt change nothing\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images(self.images)\n\n expected = self.images\n assert np.array_equal(observed, expected)\n\n def test_image_noop__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n expected = self.images\n assert np.array_equal(observed, expected)\n\n def test_image_noop__list(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images([self.base_img])\n\n expected = [self.base_img]\n assert array_equal_lists(observed, expected)\n\n def test_image_noop__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.base_img])\n\n expected = [self.base_img]\n assert array_equal_lists(observed, expected)\n\n def test_keypoints_noop(self):\n self._test_cba_noop(\"augment_keypoints\", self.kpsoi, False)\n\n def test_keypoints_noop__deterministic(self):\n self._test_cba_noop(\"augment_keypoints\", self.kpsoi, True)\n\n def test_polygons_noop(self):\n self._test_cba_noop(\"augment_polygons\", self.psoi, False)\n\n def test_polygons_noop__deterministic(self):\n self._test_cba_noop(\"augment_polygons\", self.psoi, True)\n\n def test_line_strings_noop(self):\n self._test_cba_noop(\"augment_line_strings\", self.lsoi, False)\n\n def test_line_strings_noop__deterministic(self):\n self._test_cba_noop(\"augment_line_strings\", self.lsoi, True)\n\n def test_bounding_boxes_noop(self):\n self._test_cba_noop(\"augment_bounding_boxes\", self.bbsoi, False)\n\n def test_bounding_boxes_noop__deterministic(self):\n self._test_cba_noop(\"augment_bounding_boxes\", self.bbsoi, True)\n\n @classmethod\n def _test_cba_noop(cls, augf_name, cbaoi, deterministic):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)\n if deterministic:\n aug = aug.to_deterministic()\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n expected = cbaoi\n assert_cbaois_equal(observed, expected)\n\n\n# TODO add test with multiple images\nclass TestAffine_scale(unittest.TestCase):\n def setUp(self):\n reseed()\n\n # ---------------------\n # scale: zoom in\n # ---------------------\n\n @property\n def base_img(self):\n base_img = np.array([[0, 0, 0],\n [0, 255, 0],\n [0, 0, 0]], dtype=np.uint8)\n return base_img[:, :, np.newaxis]\n\n @property\n def images(self):\n return np.array([self.base_img])\n\n @property\n def kpsoi(self):\n kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)]\n return [ia.KeypointsOnImage(kps, shape=self.base_img.shape)]\n\n def kpsoi_scaled(self, scale_y, scale_x):\n coords = np.array([\n [0, 0],\n [1, 1],\n [2, 2]\n ], dtype=np.float32)\n coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)\n return [ia.KeypointsOnImage.from_xy_array(\n coords_scaled,\n shape=self.base_img.shape)]\n\n @property\n def psoi(self):\n polys = [ia.Polygon([(0, 0), (0, 2), (2, 2)])]\n return [ia.PolygonsOnImage(polys, shape=self.base_img.shape)]\n\n def psoi_scaled(self, scale_y, scale_x):\n coords = np.array([\n [0, 0],\n [0, 2],\n [2, 2]\n ], dtype=np.float32)\n coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)\n return [ia.PolygonsOnImage(\n [ia.Polygon(coords_scaled)],\n shape=self.base_img.shape)]\n\n @property\n def lsoi(self):\n ls = [ia.LineString([(0, 0), (0, 2), (2, 2)])]\n return [ia.LineStringsOnImage(ls, shape=self.base_img.shape)]\n\n def lsoi_scaled(self, scale_y, scale_x):\n coords = np.array([\n [0, 0],\n [0, 2],\n [2, 2]\n ], dtype=np.float32)\n coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)\n return [ia.LineStringsOnImage(\n [ia.LineString(coords_scaled)],\n shape=self.base_img.shape)]\n\n @property\n def bbsoi(self):\n bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.base_img.shape)]\n\n def bbsoi_scaled(self, scale_y, scale_x):\n coords = np.array([\n [0, 1],\n [2, 3]\n ], dtype=np.float32)\n coords_scaled = self._scale_coordinates(coords, scale_y, scale_x)\n return [ia.BoundingBoxesOnImage.from_xyxy_array(\n coords_scaled.reshape((1, 4)),\n shape=self.base_img.shape)]\n\n def _scale_coordinates(self, coords, scale_y, scale_x):\n height, width = self.base_img.shape[0:2]\n coords_scaled = []\n for x, y in coords:\n # the additional +0.5 and -0.5 here makes up for the shift factor\n # used in the affine matrix generation\n offset = 0.0\n x_centered = x - width/2 + offset\n y_centered = y - height/2 + offset\n x_new = x_centered * scale_x + width/2 - offset\n y_new = y_centered * scale_y + height/2 - offset\n coords_scaled.append((x_new, y_new))\n return np.float32(coords_scaled)\n\n @property\n def scale_zoom_in_outer_pixels(self):\n base_img = self.base_img\n outer_pixels = ([], [])\n for i in sm.xrange(base_img.shape[0]):\n for j in sm.xrange(base_img.shape[1]):\n if i != j:\n outer_pixels[0].append(i)\n outer_pixels[1].append(j)\n return outer_pixels\n\n def test_image_scale_zoom_in(self):\n aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images(self.images)\n\n outer_pixels = self.scale_zoom_in_outer_pixels\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n def test_image_scale_zoom_in__deterministic(self):\n aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n outer_pixels = self.scale_zoom_in_outer_pixels\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n def test_image_scale_zoom_in__list(self):\n aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images([self.base_img])\n\n outer_pixels = self.scale_zoom_in_outer_pixels\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n def test_image_scale_zoom_in__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.base_img])\n\n outer_pixels = self.scale_zoom_in_outer_pixels\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n def test_keypoints_scale_zoom_in(self):\n self._test_cba_scale(\n \"augment_keypoints\", 1.75,\n self.kpsoi, self.kpsoi_scaled(1.75, 1.75), False)\n\n def test_keypoints_scale_zoom_in__deterministic(self):\n self._test_cba_scale(\n \"augment_keypoints\", 1.75,\n self.kpsoi, self.kpsoi_scaled(1.75, 1.75), True)\n\n def test_polygons_scale_zoom_in(self):\n self._test_cba_scale(\n \"augment_polygons\", 1.75,\n self.psoi, self.psoi_scaled(1.75, 1.75), False)\n\n def test_polygons_scale_zoom_in__deterministic(self):\n self._test_cba_scale(\n \"augment_polygons\", 1.75,\n self.psoi, self.psoi_scaled(1.75, 1.75), True)\n\n def test_line_strings_scale_zoom_in(self):\n self._test_cba_scale(\n \"augment_line_strings\", 1.75,\n self.lsoi, self.lsoi_scaled(1.75, 1.75), False)\n\n def test_line_strings_scale_zoom_in__deterministic(self):\n self._test_cba_scale(\n \"augment_line_strings\", 1.75,\n self.lsoi, self.lsoi_scaled(1.75, 1.75), True)\n\n def test_bounding_boxes_scale_zoom_in(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", 1.75,\n self.bbsoi, self.bbsoi_scaled(1.75, 1.75), False)\n\n def test_bounding_boxes_scale_zoom_in__deterministic(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", 1.75,\n self.bbsoi, self.bbsoi_scaled(1.75, 1.75), True)\n\n @classmethod\n def _test_cba_scale(cls, augf_name, scale, cbaoi, cbaoi_scaled,\n deterministic):\n aug = iaa.Affine(scale=scale, translate_px=0, rotate=0, shear=0)\n if deterministic:\n aug = aug.to_deterministic()\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(observed, cbaoi_scaled)\n\n # ---------------------\n # scale: zoom in only on x axis\n # ---------------------\n def test_image_scale_zoom_in_only_x_axis(self):\n aug = iaa.Affine(scale={\"x\": 1.75, \"y\": 1.0},\n translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images(self.images)\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n def test_image_scale_zoom_in_only_x_axis__deterministic(self):\n aug = iaa.Affine(scale={\"x\": 1.75, \"y\": 1.0},\n translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n def test_image_scale_zoom_in_only_x_axis__list(self):\n aug = iaa.Affine(scale={\"x\": 1.75, \"y\": 1.0},\n translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images([self.base_img])\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n def test_image_scale_zoom_in_only_x_axis__deterministic_and_list(self):\n aug = iaa.Affine(scale={\"x\": 1.75, \"y\": 1.0},\n translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.base_img])\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n def test_keypoints_scale_zoom_in_only_x_axis(self):\n self._test_cba_scale(\n \"augment_keypoints\", {\"y\": 1.0, \"x\": 1.75}, self.kpsoi,\n self.kpsoi_scaled(1.0, 1.75), False)\n\n def test_keypoints_scale_zoom_in_only_x_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_keypoints\", {\"y\": 1.0, \"x\": 1.75}, self.kpsoi,\n self.kpsoi_scaled(1.0, 1.75), True)\n\n def test_polygons_scale_zoom_in_only_x_axis(self):\n self._test_cba_scale(\n \"augment_polygons\", {\"y\": 1.0, \"x\": 1.75}, self.psoi,\n self.psoi_scaled(1.0, 1.75), False)\n\n def test_polygons_scale_zoom_in_only_x_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_polygons\", {\"y\": 1.0, \"x\": 1.75}, self.psoi,\n self.psoi_scaled(1.0, 1.75), True)\n\n def test_line_strings_scale_zoom_in_only_x_axis(self):\n self._test_cba_scale(\n \"augment_line_strings\", {\"y\": 1.0, \"x\": 1.75}, self.lsoi,\n self.lsoi_scaled(1.0, 1.75), False)\n\n def test_line_strings_scale_zoom_in_only_x_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_line_strings\", {\"y\": 1.0, \"x\": 1.75}, self.lsoi,\n self.lsoi_scaled(1.0, 1.75), True)\n\n def test_bounding_boxes_scale_zoom_in_only_x_axis(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", {\"y\": 1.0, \"x\": 1.75}, self.bbsoi,\n self.bbsoi_scaled(1.0, 1.75), False)\n\n def test_bounding_boxes_scale_zoom_in_only_x_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", {\"y\": 1.0, \"x\": 1.75}, self.bbsoi,\n self.bbsoi_scaled(1.0, 1.75), True)\n\n # ---------------------\n # scale: zoom in only on y axis\n # ---------------------\n def test_image_scale_zoom_in_only_y_axis(self):\n aug = iaa.Affine(scale={\"x\": 1.0, \"y\": 1.75},\n translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images(self.images)\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n def test_image_scale_zoom_in_only_y_axis__deterministic(self):\n aug = iaa.Affine(scale={\"x\": 1.0, \"y\": 1.75},\n translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n def test_image_scale_zoom_in_only_y_axis__list(self):\n aug = iaa.Affine(scale={\"x\": 1.0, \"y\": 1.75},\n translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images([self.base_img])\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n def test_image_scale_zoom_in_only_y_axis__deterministic_and_list(self):\n aug = iaa.Affine(scale={\"x\": 1.0, \"y\": 1.75},\n translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.base_img])\n\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n def test_keypoints_scale_zoom_in_only_y_axis(self):\n self._test_cba_scale(\n \"augment_keypoints\", {\"y\": 1.75, \"x\": 1.0}, self.kpsoi,\n self.kpsoi_scaled(1.75, 1.0), False)\n\n def test_keypoints_scale_zoom_in_only_y_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_keypoints\", {\"y\": 1.75, \"x\": 1.0}, self.kpsoi,\n self.kpsoi_scaled(1.75, 1.0), True)\n\n def test_polygons_scale_zoom_in_only_y_axis(self):\n self._test_cba_scale(\n \"augment_polygons\", {\"y\": 1.75, \"x\": 1.0}, self.psoi,\n self.psoi_scaled(1.75, 1.0), False)\n\n def test_polygons_scale_zoom_in_only_y_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_polygons\", {\"y\": 1.75, \"x\": 1.0}, self.psoi,\n self.psoi_scaled(1.75, 1.0), True)\n\n def test_line_strings_scale_zoom_in_only_y_axis(self):\n self._test_cba_scale(\n \"augment_polygons\", {\"y\": 1.75, \"x\": 1.0}, self.psoi,\n self.psoi_scaled(1.75, 1.0), False)\n\n def test_line_strings_scale_zoom_in_only_y_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_line_strings\", {\"y\": 1.75, \"x\": 1.0}, self.lsoi,\n self.lsoi_scaled(1.75, 1.0), True)\n\n def test_bounding_boxes_scale_zoom_in_only_y_axis(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", {\"y\": 1.75, \"x\": 1.0}, self.bbsoi,\n self.bbsoi_scaled(1.75, 1.0), False)\n\n def test_bounding_boxes_scale_zoom_in_only_y_axis__deterministic(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", {\"y\": 1.75, \"x\": 1.0}, self.bbsoi,\n self.bbsoi_scaled(1.75, 1.0), True)\n\n # ---------------------\n # scale: zoom out\n # ---------------------\n # these tests use a 4x4 area of all 255, which is zoomed out to a 4x4 area\n # in which the center 2x2 area is 255\n # zoom in should probably be adapted to this style\n # no separate tests here for x/y axis, should work fine if zoom in works\n # with that\n\n @property\n def scale_zoom_out_base_img(self):\n return np.ones((4, 4, 1), dtype=np.uint8) * 255\n\n @property\n def scale_zoom_out_images(self):\n return np.array([self.scale_zoom_out_base_img])\n\n @property\n def scale_zoom_out_outer_pixels(self):\n outer_pixels = ([], [])\n for y in sm.xrange(4):\n xs = sm.xrange(4) if y in [0, 3] else [0, 3]\n for x in xs:\n outer_pixels[0].append(y)\n outer_pixels[1].append(x)\n return outer_pixels\n\n @property\n def scale_zoom_out_inner_pixels(self):\n return [1, 1, 2, 2], [1, 2, 1, 2]\n\n @property\n def scale_zoom_out_kpsoi(self):\n kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),\n ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]\n return [ia.KeypointsOnImage(kps,\n shape=self.scale_zoom_out_base_img.shape)]\n\n @property\n def scale_zoom_out_kpsoi_aug(self):\n kps_aug = [ia.Keypoint(x=0.765, y=0.765),\n ia.Keypoint(x=2.235, y=0.765),\n ia.Keypoint(x=0.765, y=2.235),\n ia.Keypoint(x=2.235, y=2.235)]\n return [ia.KeypointsOnImage(kps_aug,\n shape=self.scale_zoom_out_base_img.shape)]\n\n def test_image_scale_zoom_out(self):\n aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images(self.scale_zoom_out_images)\n\n outer_pixels = self.scale_zoom_out_outer_pixels\n inner_pixels = self.scale_zoom_out_inner_pixels\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n def test_image_scale_zoom_out__deterministic(self):\n aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.scale_zoom_out_images)\n\n outer_pixels = self.scale_zoom_out_outer_pixels\n inner_pixels = self.scale_zoom_out_inner_pixels\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n def test_image_scale_zoom_out__list(self):\n aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)\n\n observed = aug.augment_images([self.scale_zoom_out_base_img])\n\n outer_pixels = self.scale_zoom_out_outer_pixels\n inner_pixels = self.scale_zoom_out_inner_pixels\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n def test_image_scale_zoom_out__list_and_deterministic(self):\n aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.scale_zoom_out_base_img])\n\n outer_pixels = self.scale_zoom_out_outer_pixels\n inner_pixels = self.scale_zoom_out_inner_pixels\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n def test_keypoints_scale_zoom_out(self):\n self._test_cba_scale(\n \"augment_keypoints\", 0.49, self.kpsoi,\n self.kpsoi_scaled(0.49, 0.49), False)\n\n def test_keypoints_scale_zoom_out__deterministic(self):\n self._test_cba_scale(\n \"augment_keypoints\", 0.49, self.kpsoi,\n self.kpsoi_scaled(0.49, 0.49), True)\n\n def test_polygons_scale_zoom_out(self):\n self._test_cba_scale(\n \"augment_polygons\", 0.49, self.psoi,\n self.psoi_scaled(0.49, 0.49), False)\n\n def test_polygons_scale_zoom_out__deterministic(self):\n self._test_cba_scale(\n \"augment_polygons\", 0.49, self.psoi,\n self.psoi_scaled(0.49, 0.49), True)\n\n def test_line_strings_scale_zoom_out(self):\n self._test_cba_scale(\n \"augment_line_strings\", 0.49, self.lsoi,\n self.lsoi_scaled(0.49, 0.49), False)\n\n def test_line_strings_scale_zoom_out__deterministic(self):\n self._test_cba_scale(\n \"augment_line_strings\", 0.49, self.lsoi,\n self.lsoi_scaled(0.49, 0.49), True)\n\n def test_bounding_boxes_scale_zoom_out(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", 0.49, self.bbsoi,\n self.bbsoi_scaled(0.49, 0.49), False)\n\n def test_bounding_boxes_scale_zoom_out__deterministic(self):\n self._test_cba_scale(\n \"augment_bounding_boxes\", 0.49, self.bbsoi,\n self.bbsoi_scaled(0.49, 0.49), True)\n\n # ---------------------\n # scale: x and y axis are both tuples\n # ---------------------\n def test_image_x_and_y_axis_are_tuples(self):\n aug = iaa.Affine(scale={\"x\": (0.5, 1.5), \"y\": (0.5, 1.5)},\n translate_px=0, rotate=0, shear=0)\n\n image = np.array([[0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 2, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]], dtype=np.uint8) * 100\n image = image[:, :, np.newaxis]\n images = np.array([image])\n\n last_aug = None\n nb_changed_aug = 0\n nb_iterations = 1000\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n last_aug = observed_aug\n assert nb_changed_aug >= int(nb_iterations * 0.8)\n\n def test_image_x_and_y_axis_are_tuples__deterministic(self):\n aug = iaa.Affine(scale={\"x\": (0.5, 1.5), \"y\": (0.5, 1.5)},\n translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.array([[0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 2, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]], dtype=np.uint8) * 100\n image = image[:, :, np.newaxis]\n images = np.array([image])\n\n last_aug_det = None\n nb_changed_aug_det = 0\n nb_iterations = 10\n for i in sm.xrange(nb_iterations):\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug_det = observed_aug_det\n assert nb_changed_aug_det == 0\n\n # ------------\n # alignment\n # TODO add alignment tests for: BBs, Polys, LS\n # ------------\n def test_keypoint_alignment(self):\n image = np.zeros((100, 100), dtype=np.uint8)\n image[40-1:40+2, 40-1:40+2] = 255\n image[40-1:40+2, 60-1:60+2] = 255\n\n kps = [ia.Keypoint(x=40, y=40), ia.Keypoint(x=60, y=40)]\n kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)\n\n images = [image, image, image]\n kpsois = [kpsoi.deepcopy(),\n ia.KeypointsOnImage([], shape=image.shape),\n kpsoi.deepcopy()]\n\n aug = iaa.Affine(scale=[0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,\n 1.6, 1.7],\n order=0)\n\n for iter in sm.xrange(40):\n images_aug, kpsois_aug = aug(images=images, keypoints=kpsois)\n\n assert kpsois_aug[1].empty\n\n for i in [0, 2]:\n image_aug = images_aug[i]\n kpsoi_aug = kpsois_aug[i]\n\n for kp in kpsoi_aug.keypoints:\n value = image_aug[int(kp.y), int(kp.x)]\n assert value > 200\n\n # ------------\n # make sure that polygons stay valid upon extreme scaling\n # ------------\n def test_polygons_stay_valid_when_using_extreme_scalings(self):\n scales = [1e-4, 1e-2, 1e2, 1e4]\n backends = [\"auto\", \"cv2\", \"skimage\"]\n orders = [0, 1, 3]\n\n gen = itertools.product(scales, backends, orders)\n for scale, backend, order in gen:\n with self.subTest(scale=scale, backend=backend, order=order):\n aug = iaa.Affine(scale=scale, order=order)\n psoi = ia.PolygonsOnImage([\n ia.Polygon([(0, 0), (10, 0), (5, 5)])],\n shape=(10, 10))\n\n psoi_aug = aug.augment_polygons(psoi)\n\n poly = psoi_aug.polygons[0]\n ext = poly.exterior\n assert poly.is_valid\n assert ext[0][0] < ext[2][0] < ext[1][0]\n assert ext[0][1] < ext[2][1]\n assert np.allclose(ext[0][1], ext[1][1])\n\n\nclass TestAffine_translate(unittest.TestCase):\n def setUp(self):\n reseed()\n\n @property\n def image(self):\n return np.uint8([\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0]\n ])[:, :, np.newaxis]\n\n @property\n def image_1px_right(self):\n return np.uint8([\n [0, 0, 0],\n [0, 0, 1],\n [0, 0, 0]\n ])[:, :, np.newaxis]\n\n @property\n def image_1px_bottom(self):\n return np.uint8([\n [0, 0, 0],\n [0, 0, 0],\n [0, 1, 0]\n ])[:, :, np.newaxis]\n\n @property\n def images(self):\n return np.array([self.image])\n\n @property\n def images_1px_right(self):\n return np.array([self.image_1px_right])\n\n @property\n def images_1px_bottom(self):\n return np.array([self.image_1px_bottom])\n\n @property\n def kpsoi(self):\n kps = [ia.Keypoint(x=1, y=1)]\n return [ia.KeypointsOnImage(kps, shape=self.image.shape)]\n\n @property\n def kpsoi_1px_right(self):\n kps = [ia.Keypoint(x=2, y=1)]\n return [ia.KeypointsOnImage(kps, shape=self.image.shape)]\n\n @property\n def kpsoi_1px_bottom(self):\n kps = [ia.Keypoint(x=1, y=2)]\n return [ia.KeypointsOnImage(kps, shape=self.image.shape)]\n\n @property\n def psoi(self):\n polys = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]\n return [ia.PolygonsOnImage(polys, shape=self.image.shape)]\n\n @property\n def psoi_1px_right(self):\n polys = [ia.Polygon([(0+1, 0), (2+1, 0), (2+1, 2)])]\n return [ia.PolygonsOnImage(polys, shape=self.image.shape)]\n\n @property\n def psoi_1px_bottom(self):\n polys = [ia.Polygon([(0, 0+1), (2, 0+1), (2, 2+1)])]\n return [ia.PolygonsOnImage(polys, shape=self.image.shape)]\n\n @property\n def lsoi(self):\n ls = [ia.LineString([(0, 0), (2, 0), (2, 2)])]\n return [ia.LineStringsOnImage(ls, shape=self.image.shape)]\n\n @property\n def lsoi_1px_right(self):\n ls = [ia.LineString([(0+1, 0), (2+1, 0), (2+1, 2)])]\n return [ia.LineStringsOnImage(ls, shape=self.image.shape)]\n\n @property\n def lsoi_1px_bottom(self):\n ls = [ia.LineString([(0, 0+1), (2, 0+1), (2, 2+1)])]\n return [ia.LineStringsOnImage(ls, shape=self.image.shape)]\n\n @property\n def bbsoi(self):\n bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]\n\n @property\n def bbsoi_1px_right(self):\n bbs = [ia.BoundingBox(x1=0+1, y1=1, x2=2+1, y2=3)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]\n\n @property\n def bbsoi_1px_bottom(self):\n bbs = [ia.BoundingBox(x1=0, y1=1+1, x2=2, y2=3+1)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]\n\n # ---------------------\n # translate: move one pixel to the right\n # ---------------------\n def test_image_translate_1px_right(self):\n # move one pixel to the right\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0)\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_1px_right__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_1px_right__list(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0)\n\n observed = aug.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_right])\n\n def test_image_translate_1px_right__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_right])\n\n def test_keypoints_translate_1px_right(self):\n self._test_cba_translate_px(\n \"augment_keypoints\", {\"x\": 1, \"y\": 0},\n self.kpsoi, self.kpsoi_1px_right, False)\n\n def test_keypoints_translate_1px_right__deterministic(self):\n self._test_cba_translate_px(\n \"augment_keypoints\", {\"x\": 1, \"y\": 0},\n self.kpsoi, self.kpsoi_1px_right, True)\n\n def test_polygons_translate_1px_right(self):\n self._test_cba_translate_px(\n \"augment_polygons\", {\"x\": 1, \"y\": 0},\n self.psoi, self.psoi_1px_right, False)\n\n def test_polygons_translate_1px_right__deterministic(self):\n self._test_cba_translate_px(\n \"augment_polygons\", {\"x\": 1, \"y\": 0},\n self.psoi, self.psoi_1px_right, True)\n\n def test_line_strings_translate_1px_right(self):\n self._test_cba_translate_px(\n \"augment_line_strings\", {\"x\": 1, \"y\": 0},\n self.lsoi, self.lsoi_1px_right, False)\n\n def test_line_strings_translate_1px_right__deterministic(self):\n self._test_cba_translate_px(\n \"augment_line_strings\", {\"x\": 1, \"y\": 0},\n self.lsoi, self.lsoi_1px_right, True)\n\n def test_bounding_boxes_translate_1px_right(self):\n self._test_cba_translate_px(\n \"augment_bounding_boxes\", {\"x\": 1, \"y\": 0},\n self.bbsoi, self.bbsoi_1px_right, False)\n\n def test_bounding_boxes_translate_1px_right__deterministic(self):\n self._test_cba_translate_px(\n \"augment_bounding_boxes\", {\"x\": 1, \"y\": 0},\n self.bbsoi, self.bbsoi_1px_right, True)\n\n @classmethod\n def _test_cba_translate_px(cls, augf_name, px, cbaoi, cbaoi_translated,\n deterministic):\n aug = iaa.Affine(scale=1.0, translate_px=px, rotate=0, shear=0)\n if deterministic:\n aug = aug.to_deterministic()\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(observed, cbaoi_translated)\n\n def test_image_translate_1px_right_skimage(self):\n # move one pixel to the right\n # with backend = skimage\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0, backend=\"skimage\")\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_1px_right_skimage_order_all(self):\n # move one pixel to the right\n # with backend = skimage, order=ALL\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0, backend=\"skimage\", order=ia.ALL)\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_1px_right_skimage_order_is_list(self):\n # move one pixel to the right\n # with backend = skimage, order=list\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0, backend=\"skimage\", order=[0, 1, 3])\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_1px_right_cv2_order_is_list(self):\n # move one pixel to the right\n # with backend = cv2, order=list\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0, backend=\"cv2\", order=[0, 1, 3])\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_1px_right_cv2_order_is_stoch_param(self):\n # move one pixel to the right\n # with backend = cv2, order=StochasticParameter\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 1, \"y\": 0}, rotate=0,\n shear=0, backend=\"cv2\", order=iap.Choice([0, 1, 3]))\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n # ---------------------\n # translate: move one pixel to the bottom\n # ---------------------\n def test_image_translate_1px_bottom(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 0, \"y\": 1}, rotate=0,\n shear=0)\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_bottom)\n\n def test_image_translate_1px_bottom__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 0, \"y\": 1}, rotate=0,\n shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_bottom)\n\n def test_image_translate_1px_bottom__list(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 0, \"y\": 1}, rotate=0,\n shear=0)\n\n observed = aug.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_bottom])\n\n def test_image_translate_1px_bottom__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": 0, \"y\": 1}, rotate=0,\n shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_bottom])\n\n def test_keypoints_translate_1px_bottom(self):\n self._test_cba_translate_px(\n \"augment_keypoints\", {\"x\": 0, \"y\": 1},\n self.kpsoi, self.kpsoi_1px_bottom, False)\n\n def test_keypoints_translate_1px_bottom__deterministic(self):\n self._test_cba_translate_px(\n \"augment_keypoints\", {\"x\": 0, \"y\": 1},\n self.kpsoi, self.kpsoi_1px_bottom, True)\n\n def test_polygons_translate_1px_bottom(self):\n self._test_cba_translate_px(\n \"augment_polygons\", {\"x\": 0, \"y\": 1},\n self.psoi, self.psoi_1px_bottom, False)\n\n def test_polygons_translate_1px_bottom__deterministic(self):\n self._test_cba_translate_px(\n \"augment_polygons\", {\"x\": 0, \"y\": 1},\n self.psoi, self.psoi_1px_bottom, True)\n\n def test_line_strings_translate_1px_bottom(self):\n self._test_cba_translate_px(\n \"augment_line_strings\", {\"x\": 0, \"y\": 1},\n self.lsoi, self.lsoi_1px_bottom, False)\n\n def test_line_strings_translate_1px_bottom__deterministic(self):\n self._test_cba_translate_px(\n \"augment_line_strings\", {\"x\": 0, \"y\": 1},\n self.lsoi, self.lsoi_1px_bottom, True)\n\n def test_bounding_boxes_translate_1px_bottom(self):\n self._test_cba_translate_px(\n \"augment_bounding_boxes\", {\"x\": 0, \"y\": 1},\n self.bbsoi, self.bbsoi_1px_bottom, False)\n\n def test_bounding_boxes_translate_1px_bottom__deterministic(self):\n self._test_cba_translate_px(\n \"augment_bounding_boxes\", {\"x\": 0, \"y\": 1},\n self.bbsoi, self.bbsoi_1px_bottom, True)\n\n # ---------------------\n # translate: fraction of the image size (towards the right)\n # ---------------------\n def test_image_translate_33percent_right(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0.3333, \"y\": 0},\n rotate=0, shear=0)\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_33percent_right__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0.3333, \"y\": 0},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_right)\n\n def test_image_translate_33percent_right__list(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0.3333, \"y\": 0},\n rotate=0, shear=0)\n\n observed = aug.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_right])\n\n def test_image_translate_33percent_right__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0.3333, \"y\": 0},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_right])\n\n def test_keypoints_translate_33percent_right(self):\n self._test_cba_translate_percent(\n \"augment_keypoints\", {\"x\": 0.3333, \"y\": 0},\n self.kpsoi, self.kpsoi_1px_right, False)\n\n def test_keypoints_translate_33percent_right__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_keypoints\", {\"x\": 0.3333, \"y\": 0},\n self.kpsoi, self.kpsoi_1px_right, True)\n\n def test_polygons_translate_33percent_right(self):\n self._test_cba_translate_percent(\n \"augment_polygons\", {\"x\": 0.3333, \"y\": 0},\n self.psoi, self.psoi_1px_right, False)\n\n def test_polygons_translate_33percent_right__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_polygons\", {\"x\": 0.3333, \"y\": 0},\n self.psoi, self.psoi_1px_right, True)\n\n def test_line_strings_translate_33percent_right(self):\n self._test_cba_translate_percent(\n \"augment_line_strings\", {\"x\": 0.3333, \"y\": 0},\n self.lsoi, self.lsoi_1px_right, False)\n\n def test_line_strings_translate_33percent_right__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_line_strings\", {\"x\": 0.3333, \"y\": 0},\n self.lsoi, self.lsoi_1px_right, True)\n\n def test_bounding_boxes_translate_33percent_right(self):\n self._test_cba_translate_percent(\n \"augment_bounding_boxes\", {\"x\": 0.3333, \"y\": 0},\n self.bbsoi, self.bbsoi_1px_right, False)\n\n def test_bounding_boxes_translate_33percent_right__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_bounding_boxes\", {\"x\": 0.3333, \"y\": 0},\n self.bbsoi, self.bbsoi_1px_right, True)\n\n def test_keypoints_with_continuous_param_results_in_absolute_shift(self):\n # This test ensures that t ~ uniform(a, b) results in a translation\n # by t pixels and not t%\n # see issue #505\n # use iap.Uniform() here to ensure that is really a float value that\n # is sampled and not accidentally DisceteUniform\n aug = iaa.Affine(translate_px=iap.Uniform(10, 20))\n kps = [ia.Keypoint(x=10, y=10)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(1000, 1000))\n\n for _ in np.arange(5):\n kpsoi_aug = aug.augment_keypoints(kpsoi)\n\n kp_aug = kpsoi_aug.keypoints[0]\n assert 10+10 <= kp_aug.x <= 10+20\n assert 10+10 <= kp_aug.y <= 10+20\n\n @classmethod\n def _test_cba_translate_percent(cls, augf_name, percent, cbaoi,\n cbaoi_translated, deterministic):\n aug = iaa.Affine(scale=1.0, translate_percent=percent, rotate=0,\n shear=0)\n if deterministic:\n aug = aug.to_deterministic()\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(observed, cbaoi_translated, max_distance=1e-3)\n\n # ---------------------\n # translate: fraction of the image size (towards the bottom)\n # ---------------------\n def test_image_translate_33percent_bottom(self):\n # move 33% (one pixel) to the bottom\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0, \"y\": 0.3333},\n rotate=0, shear=0)\n\n observed = aug.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_bottom)\n\n def test_image_translate_33percent_bottom__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0, \"y\": 0.3333},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert np.array_equal(observed, self.images_1px_bottom)\n\n def test_image_translate_33percent_bottom__list(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0, \"y\": 0.3333},\n rotate=0, shear=0)\n\n observed = aug.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_bottom])\n\n def test_image_translate_33percent_bottom__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_percent={\"x\": 0, \"y\": 0.3333},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.image])\n\n assert array_equal_lists(observed, [self.image_1px_bottom])\n\n def test_keypoints_translate_33percent_bottom(self):\n self._test_cba_translate_percent(\n \"augment_keypoints\", {\"x\": 0, \"y\": 0.3333},\n self.kpsoi, self.kpsoi_1px_bottom, False)\n\n def test_keypoints_translate_33percent_bottom__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_keypoints\", {\"x\": 0, \"y\": 0.3333},\n self.kpsoi, self.kpsoi_1px_bottom, True)\n\n def test_polygons_translate_33percent_bottom(self):\n self._test_cba_translate_percent(\n \"augment_polygons\", {\"x\": 0, \"y\": 0.3333},\n self.psoi, self.psoi_1px_bottom, False)\n\n def test_polygons_translate_33percent_bottom__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_polygons\", {\"x\": 0, \"y\": 0.3333},\n self.psoi, self.psoi_1px_bottom, True)\n\n def test_line_strings_translate_33percent_bottom(self):\n self._test_cba_translate_percent(\n \"augment_line_strings\", {\"x\": 0, \"y\": 0.3333},\n self.lsoi, self.lsoi_1px_bottom, False)\n\n def test_line_strings_translate_33percent_bottom__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_line_strings\", {\"x\": 0, \"y\": 0.3333},\n self.lsoi, self.lsoi_1px_bottom, True)\n\n def test_bounding_boxes_translate_33percent_bottom(self):\n self._test_cba_translate_percent(\n \"augment_bounding_boxes\", {\"x\": 0, \"y\": 0.3333},\n self.bbsoi, self.bbsoi_1px_bottom, False)\n\n def test_bounding_boxes_translate_33percent_bottom__deterministic(self):\n self._test_cba_translate_percent(\n \"augment_bounding_boxes\", {\"x\": 0, \"y\": 0.3333},\n self.bbsoi, self.bbsoi_1px_bottom, True)\n\n # ---------------------\n # translate: axiswise uniform distributions\n # ---------------------\n def test_image_translate_by_axiswise_uniform_distributions(self):\n # 0-1px to left/right and 0-1px to top/bottom\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": (-1, 1), \"y\": (-1, 1)},\n rotate=0, shear=0)\n last_aug = None\n nb_changed_aug = 0\n nb_iterations = 1000\n centers_aug = self.image.astype(np.int32) * 0\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(self.images)\n if i == 0:\n last_aug = observed_aug\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n last_aug = observed_aug\n\n assert len(observed_aug[0].nonzero()[0]) == 1\n centers_aug += (observed_aug[0] > 0)\n\n assert nb_changed_aug >= int(nb_iterations * 0.7)\n assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()\n assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()\n\n def test_image_translate_by_axiswise_uniform_distributions__det(self):\n # 0-1px to left/right and 0-1px to top/bottom\n aug = iaa.Affine(scale=1.0, translate_px={\"x\": (-1, 1), \"y\": (-1, 1)},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n last_aug_det = None\n nb_changed_aug_det = 0\n nb_iterations = 10\n centers_aug_det = self.image.astype(np.int32) * 0\n for i in sm.xrange(nb_iterations):\n observed_aug_det = aug_det.augment_images(self.images)\n if i == 0:\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug_det = observed_aug_det\n\n assert len(observed_aug_det[0].nonzero()[0]) == 1\n centers_aug_det += (observed_aug_det[0] > 0)\n\n assert nb_changed_aug_det == 0\n\n # ---------------------\n # translate heatmaps\n # ---------------------\n @property\n def heatmaps(self):\n return ia.HeatmapsOnImage(\n np.float32([\n [0.0, 0.5, 0.75],\n [0.0, 0.5, 0.75],\n [0.75, 0.75, 0.75],\n ]),\n shape=(3, 3, 3)\n )\n\n @property\n def heatmaps_1px_right(self):\n return ia.HeatmapsOnImage(\n np.float32([\n [0.0, 0.0, 0.5],\n [0.0, 0.0, 0.5],\n [0.0, 0.75, 0.75],\n ]),\n shape=(3, 3, 3)\n )\n\n def test_heatmaps_translate_1px_right(self):\n aug = iaa.Affine(translate_px={\"x\": 1})\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n _assert_same_shape(observed, self.heatmaps)\n _assert_same_min_max(observed, self.heatmaps)\n assert np.array_equal(observed.get_arr(),\n self.heatmaps_1px_right.get_arr())\n\n def test_heatmaps_translate_1px_right_should_ignore_cval(self):\n # should still use mode=constant cval=0 even when other settings chosen\n aug = iaa.Affine(translate_px={\"x\": 1}, cval=255)\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n _assert_same_shape(observed, self.heatmaps)\n _assert_same_min_max(observed, self.heatmaps)\n assert np.array_equal(observed.get_arr(),\n self.heatmaps_1px_right.get_arr())\n\n def test_heatmaps_translate_1px_right_should_ignore_mode(self):\n aug = iaa.Affine(translate_px={\"x\": 1}, mode=\"edge\", cval=255)\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n _assert_same_shape(observed, self.heatmaps)\n _assert_same_min_max(observed, self.heatmaps)\n assert np.array_equal(observed.get_arr(),\n self.heatmaps_1px_right.get_arr())\n\n # ---------------------\n # translate segmaps\n # ---------------------\n @property\n def segmaps(self):\n return SegmentationMapsOnImage(\n np.int32([\n [0, 1, 2],\n [0, 1, 2],\n [2, 2, 2],\n ]),\n shape=(3, 3, 3)\n )\n\n @property\n def segmaps_1px_right(self):\n return SegmentationMapsOnImage(\n np.int32([\n [0, 0, 1],\n [0, 0, 1],\n [0, 2, 2],\n ]),\n shape=(3, 3, 3)\n )\n\n def test_segmaps_translate_1px_right(self):\n aug = iaa.Affine(translate_px={\"x\": 1})\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n _assert_same_shape(observed, self.segmaps)\n assert np.array_equal(observed.get_arr(),\n self.segmaps_1px_right.get_arr())\n\n def test_segmaps_translate_1px_right_should_ignore_cval(self):\n # should still use mode=constant cval=0 even when other settings chosen\n aug = iaa.Affine(translate_px={\"x\": 1}, cval=255)\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n _assert_same_shape(observed, self.segmaps)\n assert np.array_equal(observed.get_arr(),\n self.segmaps_1px_right.get_arr())\n\n def test_segmaps_translate_1px_right_should_ignore_mode(self):\n aug = iaa.Affine(translate_px={\"x\": 1}, mode=\"edge\", cval=255)\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n _assert_same_shape(observed, self.segmaps)\n assert np.array_equal(observed.get_arr(),\n self.segmaps_1px_right.get_arr())\n\n\nclass TestAffine_rotate(unittest.TestCase):\n def setUp(self):\n reseed()\n\n @property\n def image(self):\n return np.uint8([\n [0, 0, 0],\n [255, 255, 255],\n [0, 0, 0]\n ])[:, :, np.newaxis]\n\n @property\n def image_rot90(self):\n return np.uint8([\n [0, 255, 0],\n [0, 255, 0],\n [0, 255, 0]\n ])[:, :, np.newaxis]\n\n @property\n def images(self):\n return np.array([self.image])\n\n @property\n def images_rot90(self):\n return np.array([self.image_rot90])\n\n @property\n def kpsoi(self):\n kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=1)]\n return [ia.KeypointsOnImage(kps, shape=self.image.shape)]\n\n @property\n def kpsoi_rot90(self):\n kps = [ia.Keypoint(x=3-1, y=0), ia.Keypoint(x=3-1, y=1),\n ia.Keypoint(x=3-1, y=2)]\n return [ia.KeypointsOnImage(kps, shape=self.image_rot90.shape)]\n\n @property\n def psoi(self):\n polys = [ia.Polygon([(0, 0), (3, 0), (3, 3)])]\n return [ia.PolygonsOnImage(polys, shape=self.image.shape)]\n\n @property\n def psoi_rot90(self):\n polys = [ia.Polygon([(3-0, 0), (3-0, 3), (3-3, 3)])]\n return [ia.PolygonsOnImage(polys, shape=self.image_rot90.shape)]\n\n @property\n def lsoi(self):\n ls = [ia.LineString([(0, 0), (3, 0), (3, 3)])]\n return [ia.LineStringsOnImage(ls, shape=self.image.shape)]\n\n @property\n def lsoi_rot90(self):\n ls = [ia.LineString([(3-0, 0), (3-0, 3), (3-3, 3)])]\n return [ia.LineStringsOnImage(ls, shape=self.image_rot90.shape)]\n\n @property\n def bbsoi(self):\n bbs = [ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)]\n\n @property\n def bbsoi_rot90(self):\n bbs = [ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)]\n return [ia.BoundingBoxesOnImage(bbs, shape=self.image_rot90.shape)]\n\n def test_image_rot90(self):\n # rotate by 90 degrees\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)\n\n observed = aug.augment_images(self.images)\n\n observed[observed >= 100] = 255\n observed[observed < 100] = 0\n assert np.array_equal(observed, self.images_rot90)\n\n def test_image_rot90__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n observed[observed >= 100] = 255\n observed[observed < 100] = 0\n assert np.array_equal(observed, self.images_rot90)\n\n def test_image_rot90__list(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)\n\n observed = aug.augment_images([self.image])\n\n observed[0][observed[0] >= 100] = 255\n observed[0][observed[0] < 100] = 0\n assert array_equal_lists(observed, [self.image_rot90])\n\n def test_image_rot90__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.image])\n\n observed[0][observed[0] >= 100] = 255\n observed[0][observed[0] < 100] = 0\n assert array_equal_lists(observed, [self.image_rot90])\n\n def test_keypoints_rot90(self):\n self._test_cba_rotate(\n \"augment_keypoints\", 90, self.kpsoi, self.kpsoi_rot90, False)\n\n def test_keypoints_rot90__deterministic(self):\n self._test_cba_rotate(\n \"augment_keypoints\", 90, self.kpsoi, self.kpsoi_rot90, True)\n\n def test_polygons_rot90(self):\n self._test_cba_rotate(\n \"augment_polygons\", 90, self.psoi, self.psoi_rot90, False)\n\n def test_polygons_rot90__deterministic(self):\n self._test_cba_rotate(\n \"augment_polygons\", 90, self.psoi, self.psoi_rot90, True)\n\n def test_line_strings_rot90(self):\n self._test_cba_rotate(\n \"augment_line_strings\", 90, self.lsoi, self.lsoi_rot90, False)\n\n def test_line_strings_rot90__deterministic(self):\n self._test_cba_rotate(\n \"augment_line_strings\", 90, self.lsoi, self.lsoi_rot90, True)\n\n def test_bounding_boxes_rot90(self):\n self._test_cba_rotate(\n \"augment_bounding_boxes\", 90, self.bbsoi, self.bbsoi_rot90, False)\n\n def test_bounding_boxes_rot90__deterministic(self):\n self._test_cba_rotate(\n \"augment_bounding_boxes\", 90, self.bbsoi, self.bbsoi_rot90, True)\n\n @classmethod\n def _test_cba_rotate(cls, augf_name, rotate, cbaoi,\n cbaoi_rotated, deterministic):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=rotate,\n shear=0)\n if deterministic:\n aug = aug.to_deterministic()\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(observed, cbaoi_rotated)\n\n def test_image_rotate_is_tuple_0_to_364_deg(self):\n # random rotation 0-364 degrees\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)\n last_aug = None\n nb_changed_aug = 0\n nb_iterations = 1000\n pixels_sums_aug = self.image.astype(np.int32) * 0\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(self.images)\n if i == 0:\n last_aug = observed_aug\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n last_aug = observed_aug\n\n pixels_sums_aug += (observed_aug[0] > 100)\n\n assert nb_changed_aug >= int(nb_iterations * 0.9)\n # center pixel, should always be white when rotating line around center\n assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)\n assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)\n\n # outer pixels, should sometimes be white\n # the values here had to be set quite tolerant, the middle pixels at\n # top/left/bottom/right get more activation than expected\n outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2],\n [0, 1, 2, 0, 2, 0, 1, 2])\n assert (\n pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))\n ).all()\n assert (\n pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))\n ).all()\n\n def test_image_rotate_is_tuple_0_to_364_deg__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)\n aug_det = aug.to_deterministic()\n last_aug_det = None\n nb_changed_aug_det = 0\n nb_iterations = 10\n pixels_sums_aug_det = self.image.astype(np.int32) * 0\n for i in sm.xrange(nb_iterations):\n observed_aug_det = aug_det.augment_images(self.images)\n if i == 0:\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug_det = observed_aug_det\n\n pixels_sums_aug_det += (observed_aug_det[0] > 100)\n\n assert nb_changed_aug_det == 0\n # center pixel, should always be white when rotating line around center\n assert pixels_sums_aug_det[1, 1] > (nb_iterations * 0.98)\n assert pixels_sums_aug_det[1, 1] < (nb_iterations * 1.02)\n\n def test_alignment_between_images_and_heatmaps_for_fixed_rot(self):\n # measure alignment between images and heatmaps when rotating\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n aug = iaa.Affine(rotate=45, backend=backend)\n image = np.zeros((7, 6), dtype=np.uint8)\n image[:, 2:3+1] = 255\n hm = ia.HeatmapsOnImage(image.astype(np.float32)/255, shape=(7, 6))\n\n img_aug = aug.augment_image(image)\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = hm_aug.arr_0to1 > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (7, 6)\n assert hm_aug.arr_0to1.shape == (7, 6, 1)\n assert (same / img_aug_mask.size) >= 0.95\n\n def test_alignment_between_images_and_smaller_heatmaps_for_fixed_rot(self):\n # measure alignment between images and heatmaps when rotating\n # here with smaller heatmaps\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=45, backend=backend)\n\n image = np.zeros((56, 48), dtype=np.uint8)\n image[:, 16:24+1] = 255\n hm = ia.HeatmapsOnImage(\n ia.imresize_single_image(\n image, (28, 24), interpolation=\"cubic\"\n ).astype(np.float32)/255,\n shape=(56, 48)\n )\n\n img_aug = aug.augment_image(image)\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, img_aug.shape[0:2], interpolation=\"cubic\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (56, 48)\n assert hm_aug.arr_0to1.shape == (28, 24, 1)\n assert (same / img_aug_mask.size) >= 0.9\n\n def test_bounding_boxes_have_expected_shape_after_augmentation(self):\n image = np.zeros((100, 100), dtype=np.uint8)\n image[20:80, 20:80] = 255\n bb = ia.BoundingBox(x1=20, y1=20, x2=80, y2=80)\n bbsoi = ia.BoundingBoxesOnImage([bb], shape=image.shape)\n for rotate in [10, 20, 40, 80, 120]:\n with self.subTest(rotate=rotate):\n aug = iaa.Affine(rotate=rotate, order=0)\n\n image_aug, bbsoi_aug = aug(image=image, bounding_boxes=bbsoi)\n\n xx = np.nonzero(np.max(image_aug > 100, axis=0))[0]\n yy = np.nonzero(np.max(image_aug > 100, axis=1))[0]\n bb_exp_x1 = xx[0]\n bb_exp_x2 = xx[-1]\n bb_exp_y1 = yy[0]\n bb_exp_y2 = yy[-1]\n bb_expected = ia.BoundingBox(x1=bb_exp_x1, y1=bb_exp_y1,\n x2=bb_exp_x2, y2=bb_exp_y2)\n assert bbsoi_aug.bounding_boxes[0].iou(bb_expected) > 0.95\n\n\nclass TestAffine_cval(unittest.TestCase):\n @property\n def image(self):\n return np.ones((3, 3, 1), dtype=np.uint8) * 255\n\n @property\n def images(self):\n return np.array([self.image])\n\n def test_image_fixed_cval(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=128)\n\n observed = aug.augment_images(self.images)\n\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n def test_image_fixed_cval__deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=128)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images(self.images)\n\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n def test_image_fixed_cval__list(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=128)\n\n observed = aug.augment_images([self.image])\n\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n def test_image_fixed_cval__list_and_deterministic(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=128)\n aug_det = aug.to_deterministic()\n\n observed = aug_det.augment_images([self.image])\n\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n def test_image_cval_is_tuple(self):\n # random cvals\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=(0, 255))\n last_aug = None\n nb_changed_aug = 0\n nb_iterations = 1000\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(self.images)\n\n if i == 0:\n last_aug = observed_aug\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n last_aug = observed_aug\n\n assert nb_changed_aug >= int(nb_iterations * 0.9)\n\n def test_image_cval_is_tuple__deterministic(self):\n # random cvals\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=(0, 255))\n aug_det = aug.to_deterministic()\n last_aug_det = None\n nb_changed_aug_det = 0\n nb_iterations = 10\n for i in sm.xrange(nb_iterations):\n observed_aug_det = aug_det.augment_images(self.images)\n\n if i == 0:\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug_det = observed_aug_det\n\n assert nb_changed_aug_det == 0\n\n def test_float_cval_on_float_image(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0.25)\n image = np.full((10, 10, 3), 0.75, dtype=np.float32)\n image_aug = aug(image=image)\n assert np.allclose(image_aug, 0.25)\n\n def test_float_cval_on_int_image(self):\n aug = iaa.Affine(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=2.75)\n image = np.full((10, 10, 3), 10, dtype=np.uint8)\n image_aug = aug(image=image)\n assert np.allclose(image_aug, 2) # cval is casted to int, no rounding\n\n\nclass TestAffine_fit_output(unittest.TestCase):\n @property\n def image(self):\n return np.ones((3, 3, 1), dtype=np.uint8) * 255\n\n @property\n def images(self):\n return np.array([self.image])\n\n @property\n def heatmaps(self):\n return ia.HeatmapsOnImage(\n np.float32([\n [0.0, 0.5, 0.75],\n [0.0, 0.5, 0.75],\n [0.75, 0.75, 0.75],\n ]),\n shape=(3, 3, 3)\n )\n\n @property\n def kpsoi(self):\n kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=1)]\n return [ia.KeypointsOnImage(kps, shape=self.image.shape)]\n\n def test_image_translate(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(translate_px=100, fit_output=True,\n backend=backend)\n\n observed = aug.augment_images(self.images)\n\n expected = self.images\n assert np.array_equal(observed, expected)\n\n def test_keypoints_translate(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(translate_px=100, fit_output=True,\n backend=backend)\n\n observed = aug.augment_keypoints(self.kpsoi)\n\n expected = self.kpsoi\n assert keypoints_equal(observed, expected)\n\n def test_heatmaps_translate(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(translate_px=100, fit_output=True,\n backend=backend)\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n expected = self.heatmaps\n assert np.allclose(observed.arr_0to1, expected.arr_0to1)\n\n def test_image_rot45(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=45, fit_output=True,\n backend=backend)\n img = np.zeros((10, 10), dtype=np.uint8)\n img[0:2, 0:2] = 255\n img[-2:, 0:2] = 255\n img[0:2, -2:] = 255\n img[-2:, -2:] = 255\n\n img_aug = aug.augment_image(img)\n\n _labels, nb_labels = skimage.morphology.label(\n img_aug > 240, return_num=True, connectivity=2)\n assert nb_labels == 4\n\n def test_heatmaps_rot45(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=45, fit_output=True,\n backend=backend)\n img = np.zeros((10, 10), dtype=np.uint8)\n img[0:2, 0:2] = 255\n img[-2:, 0:2] = 255\n img[0:2, -2:] = 255\n img[-2:, -2:] = 255\n hm = ia.HeatmapsOnImage(img.astype(np.float32)/255,\n shape=(10, 10))\n\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n _labels, nb_labels = skimage.morphology.label(\n hm_aug.arr_0to1 > 240/255, return_num=True, connectivity=2)\n assert nb_labels == 4\n\n def test_heatmaps_rot45__heatmaps_smaller_than_image(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=45, fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n hm = HeatmapsOnImage(\n ia.imresize_single_image(\n img, (40, 40), interpolation=\"cubic\"\n ).astype(np.float32)/255,\n shape=(80, 80)\n )\n\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n # these asserts are deactivated because the image size can\n # change under fit_output=True\n # assert hm_aug.shape == (80, 80)\n # assert hm_aug.arr_0to1.shape == (40, 40, 1)\n _labels, nb_labels = skimage.morphology.label(\n hm_aug.arr_0to1 > 200/255, return_num=True, connectivity=2)\n assert nb_labels == 4\n\n def test_image_heatmap_alignment_random_rots(self):\n nb_iterations = 50\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n for _ in sm.xrange(nb_iterations):\n aug = iaa.Affine(rotate=(0, 364), fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n hm = HeatmapsOnImage(\n img.astype(np.float32)/255,\n shape=(80, 80)\n )\n\n img_aug = aug.augment_image(img)\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, img_aug.shape[0:2],\n interpolation=\"cubic\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.95\n\n def test_image_heatmap_alignment_random_rots__hms_smaller_than_img(self):\n nb_iterations = 50\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n for _ in sm.xrange(nb_iterations):\n aug = iaa.Affine(rotate=(0, 364), fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n hm = HeatmapsOnImage(\n ia.imresize_single_image(\n img, (40, 40), interpolation=\"cubic\"\n ).astype(np.float32)/255,\n shape=(80, 80)\n )\n\n img_aug = aug.augment_image(img)\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, img_aug.shape[0:2],\n interpolation=\"cubic\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.95\n\n def test_segmaps_rot45(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=45, fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n segmap = SegmentationMapsOnImage(\n (img > 100).astype(np.int32),\n shape=(80, 80)\n )\n\n segmap_aug = aug.augment_segmentation_maps([segmap])[0]\n\n # these asserts are deactivated because the image size can\n # change under fit_output=True\n # assert segmap_aug.shape == (80, 80)\n # assert segmap_aug.arr_0to1.shape == (40, 40, 1)\n _labels, nb_labels = skimage.morphology.label(\n segmap_aug.arr > 0, return_num=True, connectivity=2)\n assert nb_labels == 4\n\n def test_segmaps_rot45__segmaps_smaller_than_img(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=45, fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n segmap = SegmentationMapsOnImage(\n (\n ia.imresize_single_image(\n img, (40, 40), interpolation=\"cubic\"\n ) > 100\n ).astype(np.int32),\n shape=(80, 80)\n )\n\n segmap_aug = aug.augment_segmentation_maps([segmap])[0]\n\n # these asserts are deactivated because the image size can\n # change under fit_output=True\n # assert segmap_aug.shape == (80, 80)\n # assert segmap_aug.arr_0to1.shape == (40, 40, 1)\n _labels, nb_labels = skimage.morphology.label(\n segmap_aug.arr > 0, return_num=True, connectivity=2)\n assert nb_labels == 4\n\n def test_image_segmap_alignment_random_rots(self):\n nb_iterations = 50\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n for _ in sm.xrange(nb_iterations):\n aug = iaa.Affine(rotate=(0, 364), fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n segmap = SegmentationMapsOnImage(\n (img > 100).astype(np.int32),\n shape=(80, 80)\n )\n\n img_aug = aug.augment_image(img)\n segmap_aug = aug.augment_segmentation_maps([segmap])[0]\n\n img_aug_mask = img_aug > 100\n segmap_aug_mask = ia.imresize_single_image(\n segmap_aug.arr,\n img_aug.shape[0:2],\n interpolation=\"nearest\"\n ) > 0\n same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.95\n\n def test_image_segmap_alignment_random_rots__sms_smaller_than_img(self):\n nb_iterations = 50\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n for _ in sm.xrange(nb_iterations):\n aug = iaa.Affine(rotate=(0, 364), fit_output=True,\n backend=backend)\n img = np.zeros((80, 80), dtype=np.uint8)\n img[0:5, 0:5] = 255\n img[-5:, 0:5] = 255\n img[0:5, -5:] = 255\n img[-5:, -5:] = 255\n segmap = SegmentationMapsOnImage(\n (\n ia.imresize_single_image(\n img, (40, 40), interpolation=\"cubic\"\n ) > 100\n ).astype(np.int32),\n shape=(80, 80)\n )\n\n img_aug = aug.augment_image(img)\n segmap_aug = aug.augment_segmentation_maps([segmap])[0]\n\n img_aug_mask = img_aug > 100\n segmap_aug_mask = ia.imresize_single_image(\n segmap_aug.arr,\n img_aug.shape[0:2],\n interpolation=\"nearest\"\n ) > 0\n same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.95\n\n def test_keypoints_rot90_without_fit_output(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=90, backend=backend)\n kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],\n shape=(100, 200, 3))\n kps_aug = aug.augment_keypoints(kps)\n assert kps_aug.shape == (100, 200, 3)\n assert not np.allclose(\n [kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],\n [kps.keypoints[0].x, kps.keypoints[0].y],\n atol=1e-2, rtol=0)\n\n def test_keypoints_rot90(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)\n kps = ia.KeypointsOnImage([ia.Keypoint(10, 10)],\n shape=(100, 200, 3))\n\n kps_aug = aug.augment_keypoints(kps)\n\n assert kps_aug.shape == (200, 100, 3)\n assert not np.allclose(\n [kps_aug.keypoints[0].x, kps_aug.keypoints[0].y],\n [kps.keypoints[0].x, kps.keypoints[0].y],\n atol=1e-2, rtol=0)\n\n def test_empty_keypoints_rot90(self):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)\n kps = ia.KeypointsOnImage([], shape=(100, 200, 3))\n\n kps_aug = aug.augment_keypoints(kps)\n\n assert kps_aug.shape == (200, 100, 3)\n assert len(kps_aug.keypoints) == 0\n\n def _test_cbaoi_rot90_without_fit_output(self, cbaoi, augf_name):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n # verify that shape in PolygonsOnImages changes\n aug = iaa.Affine(rotate=90, backend=backend)\n\n cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])\n\n assert len(cbaoi_aug) == 2\n for cbaoi_aug_i in cbaoi_aug:\n if isinstance(cbaoi, (ia.PolygonsOnImage,\n ia.LineStringsOnImage)):\n assert cbaoi_aug_i.shape == cbaoi.shape\n assert not cbaoi_aug_i.items[0].coords_almost_equals(\n cbaoi.items[0].coords, max_distance=1e-2)\n else:\n assert_cbaois_equal(cbaoi_aug_i, cbaoi)\n\n def test_polygons_rot90_without_fit_output(self):\n psoi = ia.PolygonsOnImage([\n ia.Polygon([(10, 10), (20, 10), (20, 20)])\n ], shape=(100, 200, 3))\n\n self._test_cbaoi_rot90_without_fit_output(psoi, \"augment_polygons\")\n\n def test_line_strings_rot90_without_fit_output(self):\n lsoi = ia.LineStringsOnImage([\n ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])\n ], shape=(100, 200, 3))\n\n self._test_cbaoi_rot90_without_fit_output(lsoi, \"augment_line_strings\")\n\n def _test_cbaoi_rot90(self, cbaoi, expected, augf_name):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)\n\n cbaoi_aug = getattr(aug, augf_name)([cbaoi, cbaoi])\n\n assert len(cbaoi_aug) == 2\n for cbaoi_aug_i in cbaoi_aug:\n assert_cbaois_equal(cbaoi_aug_i, expected)\n\n def test_polygons_rot90(self):\n psoi = ia.PolygonsOnImage([\n ia.Polygon([(10, 10), (20, 10), (20, 20)])\n ], shape=(100, 200, 3))\n expected = ia.PolygonsOnImage([\n ia.Polygon([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20)])\n ], shape=(200, 100, 3))\n self._test_cbaoi_rot90(psoi, expected, \"augment_polygons\")\n\n def test_line_strings_rot90(self):\n lsoi = ia.LineStringsOnImage([\n ia.LineString([(10, 10), (20, 10), (20, 20), (10, 10)])\n ], shape=(100, 200, 3))\n expected = ia.LineStringsOnImage([\n ia.LineString([(100-10-1, 10), (100-10-1, 20), (100-20-1, 20),\n (100-10-1, 10)])\n ], shape=(200, 100, 3))\n self._test_cbaoi_rot90(lsoi, expected, \"augment_line_strings\")\n\n def test_bounding_boxes_rot90(self):\n lsoi = ia.BoundingBoxesOnImage([\n ia.BoundingBox(x1=10, y1=10, x2=20, y2=20)\n ], shape=(100, 200, 3))\n expected = ia.BoundingBoxesOnImage([\n ia.BoundingBox(x1=100-20-1, y1=10, x2=100-10-1, y2=20)\n ], shape=(200, 100, 3))\n self._test_cbaoi_rot90(lsoi, expected, \"augment_bounding_boxes\")\n\n def _test_empty_cbaoi_rot90(self, cbaoi, expected, augf_name):\n for backend in [\"auto\", \"cv2\", \"skimage\"]:\n with self.subTest(backend=backend):\n aug = iaa.Affine(rotate=90, fit_output=True, backend=backend)\n\n cbaoi_aug = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(cbaoi_aug, expected)\n\n def test_empty_polygons_rot90(self):\n psoi = ia.PolygonsOnImage([], shape=(100, 200, 3))\n expected = ia.PolygonsOnImage([], shape=(200, 100, 3))\n self._test_empty_cbaoi_rot90(psoi, expected, \"augment_polygons\")\n\n def test_empty_line_strings_rot90(self):\n lsoi = ia.LineStringsOnImage([], shape=(100, 200, 3))\n expected = ia.LineStringsOnImage([], shape=(200, 100, 3))\n self._test_empty_cbaoi_rot90(lsoi, expected, \"augment_line_strings\")\n\n def test_empty_bounding_boxes_rot90(self):\n bbsoi = ia.BoundingBoxesOnImage([], shape=(100, 200, 3))\n expected = ia.BoundingBoxesOnImage([], shape=(200, 100, 3))\n self._test_empty_cbaoi_rot90(bbsoi, expected, \"augment_bounding_boxes\")\n\n\n# TODO merge these into TestAffine_rotate since they are rotations?\n# or extend to contain other affine params too?\nclass TestAffine_alignment(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test_image_segmap_alignment_with_translate_px(self):\n image = np.zeros((80, 100, 3), dtype=np.uint8)\n image[40-10:40+10, 50-10:50+10, :] = 255\n hm = np.zeros((40, 50, 1), dtype=np.float32)\n hm[20-5:20+5, 25-5:25+5, 0] = 1.0\n hm = ia.HeatmapsOnImage(hm, shape=image.shape)\n\n # note that if x is an odd value (e.g. 1), the projection is a bit\n # less accurate as x=1 projected to a half-sized segmap is x=0.5,\n # leading to interpolation effects\n xvals = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, [0, 10, 20]]\n\n for xvals_i in xvals:\n with self.subTest(x=xvals_i):\n aug = iaa.Affine(translate_px={\"x\": xvals_i})\n iterations = 2 if ia.is_single_number(xvals_i) else 20\n\n for _ in np.arange(iterations):\n image_aug, hm_aug = aug(image=image, heatmaps=hm)\n\n hm_aug_arr_rs = ia.imresize_single_image(\n hm_aug.get_arr(), (80, 100), interpolation=\"nearest\")\n overlap_true = np.sum(\n np.logical_and(\n (image_aug[..., 0] > 220),\n (hm_aug_arr_rs[..., 0] > 0.9)\n )\n )\n p_same_on_zero_cells = np.average(\n (image_aug[..., 0] > 220)\n == (hm_aug_arr_rs[..., 0] > 0.9))\n assert overlap_true > 19*19\n assert p_same_on_zero_cells > 0.98\n\n def test_image_segmap_alignment_with_translate_percent(self):\n image = np.zeros((80, 100, 3), dtype=np.uint8)\n image[40-10:40+10, 50-10:50+10, :] = 255\n hm = np.zeros((40, 50, 1), dtype=np.float32)\n hm[20-5:20+5, 25-5:25+5, 0] = 1.0\n hm = ia.HeatmapsOnImage(hm, shape=image.shape)\n\n # note that if x is an odd value (e.g. 1), the projection is a bit\n # less accurate as x=1 projected to a half-sized segmap is x=0.5,\n # leading to interpolation effects\n width = image.shape[1]\n xvals = [0/width, 2/width, 4/width, 6/width, 8/width, 10/width,\n 12/width, 14/width, 16/width, 18/width, 20/width,\n [0/width, 10/width, 20/width]]\n\n for xvals_i in xvals:\n with self.subTest(x=xvals_i):\n aug = iaa.Affine(translate_percent={\"x\": xvals_i})\n iterations = 2 if ia.is_single_number(xvals_i) else 20\n\n for _ in np.arange(iterations):\n image_aug, hm_aug = aug(image=image, heatmaps=hm)\n\n hm_aug_arr_rs = ia.imresize_single_image(\n hm_aug.get_arr(), (80, 100), interpolation=\"nearest\")\n overlap_true = np.sum(\n np.logical_and(\n (image_aug[..., 0] > 220),\n (hm_aug_arr_rs[..., 0] > 0.9)\n )\n )\n p_same_on_zero_cells = np.average(\n (image_aug[..., 0] > 220)\n == (hm_aug_arr_rs[..., 0] > 0.9))\n assert overlap_true > 19*19\n assert p_same_on_zero_cells > 0.98\n\n def test_image_keypoint_alignment(self):\n aug = iaa.Affine(rotate=[0, 180], order=0)\n img = np.zeros((10, 10), dtype=np.uint8)\n img[0:5, 5] = 255\n img[2, 4:6] = 255\n img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]\n kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=5, y=2)], shape=img.shape)\n kpsoi_rot = [(5, 2), (5, 10-2)]\n img_aug_indices = []\n kpsois_aug_indices = []\n for _ in sm.xrange(40):\n aug_det = aug.to_deterministic()\n imgs_aug = aug_det.augment_images([img, img])\n kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])\n\n assert kpsois_aug[0].shape == img.shape\n assert kpsois_aug[1].shape == img.shape\n\n for img_aug in imgs_aug:\n if np.array_equal(img_aug, img_rot[0]):\n img_aug_indices.append(0)\n elif np.array_equal(img_aug, img_rot[1]):\n img_aug_indices.append(1)\n else:\n assert False\n for kpsoi_aug in kpsois_aug:\n similar_to_rot_0 = np.allclose(\n [kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],\n kpsoi_rot[0])\n similar_to_rot_180 = np.allclose(\n [kpsoi_aug.keypoints[0].x, kpsoi_aug.keypoints[0].y],\n kpsoi_rot[1])\n if similar_to_rot_0:\n kpsois_aug_indices.append(0)\n elif similar_to_rot_180:\n kpsois_aug_indices.append(1)\n else:\n assert False\n assert np.array_equal(img_aug_indices, kpsois_aug_indices)\n assert len(set(img_aug_indices)) == 2\n assert len(set(kpsois_aug_indices)) == 2\n\n @classmethod\n def _test_image_cbaoi_alignment(cls, cbaoi, cbaoi_rot, augf_name):\n aug = iaa.Affine(rotate=[0, 180], order=0)\n img = np.zeros((10, 10), dtype=np.uint8)\n img[0:5, 5] = 255\n img[2, 4:6] = 255\n img_rot = [np.copy(img), np.copy(np.flipud(np.fliplr(img)))]\n\n img_aug_indices = []\n cbaois_aug_indices = []\n for _ in sm.xrange(40):\n aug_det = aug.to_deterministic()\n imgs_aug = aug_det.augment_images([img, img])\n cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])\n\n assert cbaois_aug[0].shape == img.shape\n assert cbaois_aug[1].shape == img.shape\n if hasattr(cbaois_aug[0].items[0], \"is_valid\"):\n assert cbaois_aug[0].items[0].is_valid\n assert cbaois_aug[1].items[0].is_valid\n\n for img_aug in imgs_aug:\n if np.array_equal(img_aug, img_rot[0]):\n img_aug_indices.append(0)\n elif np.array_equal(img_aug, img_rot[1]):\n img_aug_indices.append(1)\n else:\n assert False\n for cbaoi_aug in cbaois_aug:\n if cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[0]):\n cbaois_aug_indices.append(0)\n elif cbaoi_aug.items[0].coords_almost_equals(cbaoi_rot[1]):\n cbaois_aug_indices.append(1)\n else:\n assert False\n assert np.array_equal(img_aug_indices, cbaois_aug_indices)\n assert len(set(img_aug_indices)) == 2\n assert len(set(cbaois_aug_indices)) == 2\n\n def test_image_polygon_alignment(self):\n psoi = ia.PolygonsOnImage([ia.Polygon([(1, 1), (9, 1), (5, 5)])],\n shape=(10, 10))\n psoi_rot = [\n psoi.polygons[0].deepcopy(),\n ia.Polygon([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])\n ]\n self._test_image_cbaoi_alignment(psoi, psoi_rot,\n \"augment_polygons\")\n\n def test_image_line_string_alignment(self):\n lsoi = ia.LineStringsOnImage([ia.LineString([(1, 1), (9, 1), (5, 5)])],\n shape=(10, 10))\n lsoi_rot = [\n lsoi.items[0].deepcopy(),\n ia.LineString([(10-1, 10-1), (10-9, 10-1), (10-5, 10-5)])\n ]\n self._test_image_cbaoi_alignment(lsoi, lsoi_rot,\n \"augment_line_strings\")\n\n def test_image_bounding_box_alignment(self):\n bbsoi = ia.BoundingBoxesOnImage([\n ia.BoundingBox(x1=1, y1=1, x2=9, y2=5)], shape=(10, 10))\n bbsoi_rot = [\n bbsoi.items[0].deepcopy(),\n ia.BoundingBox(x1=10-9, y1=10-5, x2=10-1, y2=10-1)]\n self._test_image_cbaoi_alignment(bbsoi, bbsoi_rot,\n \"augment_bounding_boxes\")\n\n\nclass TestAffine_other_dtypes(unittest.TestCase):\n @property\n def translate_mask(self):\n mask = np.zeros((3, 3), dtype=bool)\n mask[1, 2] = True\n return mask\n\n @property\n def image(self):\n image = np.zeros((17, 17), dtype=bool)\n image[2:15, 5:13] = True\n return image\n\n @property\n def rot_mask_inner(self):\n img_flipped = iaa.Fliplr(1.0)(image=self.image)\n return img_flipped == 1\n\n @property\n def rot_mask_outer(self):\n img_flipped = iaa.Fliplr(1.0)(image=self.image)\n return img_flipped == 0\n\n @property\n def rot_thresh_inner(self):\n return 0.9\n\n @property\n def rot_thresh_outer(self):\n return 0.9\n\n def rot_thresh_inner_float(self, order):\n return 0.85 if order == 1 else 0.7\n\n def rot_thresh_outer_float(self, order):\n return 0.85 if order == 1 else 0.4\n\n def test_translate_skimage_order_0_bool(self):\n aug = iaa.Affine(translate_px={\"x\": 1}, order=0, mode=\"constant\",\n backend=\"skimage\")\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert np.all(image_aug[~self.translate_mask] == 0)\n assert np.all(image_aug[self.translate_mask] == 1)\n\n def test_translate_skimage_order_0_uint_int(self):\n dtypes = [\"uint8\", \"uint16\", \"uint32\", \"int8\", \"int16\", \"int32\"]\n for dtype in dtypes:\n aug = iaa.Affine(translate_px={\"x\": 1}, order=0, mode=\"constant\",\n backend=\"skimage\")\n\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n if np.dtype(dtype).kind == \"i\":\n values = [1, 5, 10, 100, int(0.1 * max_value),\n int(0.2 * max_value), int(0.5 * max_value),\n max_value - 100, max_value]\n values = values + [(-1) * value for value in values]\n else:\n values = [1, 5, 10, 100, int(center_value),\n int(0.1 * max_value), int(0.2 * max_value),\n int(0.5 * max_value), max_value - 100, max_value]\n\n for value in values:\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert np.all(image_aug[~self.translate_mask] == 0)\n assert np.all(image_aug[self.translate_mask] == value)\n\n def test_translate_skimage_order_0_float(self):\n # float\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n aug = iaa.Affine(translate_px={\"x\": 1}, order=0, mode=\"constant\",\n backend=\"skimage\")\n\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [\n 0.01,\n 1.0,\n 10.0,\n 100.0,\n 500 ** (isize - 1),\n float(np.float64(1000 ** (isize - 1)))\n ]\n values = values + [(-1) * value for value in values]\n values = values + [min_value, max_value]\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert np.all(_isclose(image_aug[~self.translate_mask], 0))\n assert np.all(_isclose(image_aug[self.translate_mask],\n value))\n\n def test_rotate_skimage_order_not_0_bool(self):\n # skimage, order!=0 and rotate=180\n for order in [1, 3, 4, 5]:\n aug = iaa.Affine(rotate=180, order=order, mode=\"constant\",\n backend=\"skimage\")\n aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])\n\n image = np.zeros((17, 17), dtype=bool)\n image[2:15, 5:13] = True\n\n image_aug = aug.augment_image(image)\n image_exp = aug_flip.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert (\n np.sum(image_aug == image_exp)/image.size\n ) > self.rot_thresh_inner\n\n def test_rotate_skimage_order_not_0_uint_int(self):\n def _compute_matching(image_aug, image_exp, mask):\n return np.sum(\n np.isclose(image_aug[mask], image_exp[mask], rtol=0,\n atol=1.001)\n ) / np.sum(mask)\n\n dtypes = [\"uint8\", \"uint16\", \"uint32\", \"int8\", \"int16\", \"int32\"]\n for dtype in dtypes:\n for order in [1, 3, 4, 5]:\n aug = iaa.Affine(rotate=180, order=order, mode=\"constant\",\n backend=\"skimage\")\n aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])\n\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n if np.dtype(dtype).kind == \"i\":\n values = [1, 5, 10, 100, int(0.1 * max_value),\n int(0.2 * max_value), int(0.5 * max_value),\n max_value - 100, max_value]\n values = values + [(-1) * value for value in values]\n else:\n values = [1, 5, 10, 100, int(center_value),\n int(0.1 * max_value), int(0.2 * max_value),\n int(0.5 * max_value), max_value - 100, max_value]\n\n for value in values:\n with self.subTest(dtype=dtype, order=order, value=value):\n image = np.zeros((17, 17), dtype=dtype)\n image[2:15, 5:13] = value\n\n image_aug = aug.augment_image(image)\n image_exp = aug_flip.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert _compute_matching(\n image_aug, image_exp, self.rot_mask_inner\n ) > self.rot_thresh_inner\n assert _compute_matching(\n image_aug, image_exp, self.rot_mask_outer\n ) > self.rot_thresh_outer\n\n def test_rotate_skimage_order_not_0_float(self):\n def _compute_matching(image_aug, image_exp, mask):\n return np.sum(\n _isclose(image_aug[mask], image_exp[mask])\n ) / np.sum(mask)\n\n for order in [1, 3, 4, 5]:\n dtypes = [\"float16\", \"float32\", \"float64\"]\n if order == 5:\n # float64 caused too many interpolation inaccuracies for\n # order=5, not wrong but harder to test\n dtypes = [\"float16\", \"float32\"]\n for dtype in dtypes:\n aug = iaa.Affine(rotate=180, order=order, mode=\"constant\",\n backend=\"skimage\")\n aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])\n\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n if order not in [0, 1]:\n atol = 1e-2\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),\n 1000 ** (isize - 1)]\n values = values + [(-1) * value for value in values]\n if order not in [3, 4]: # results in NaNs otherwise\n values = values + [min_value, max_value]\n for value in values:\n with self.subTest(order=order, dtype=dtype, value=value):\n image = np.zeros((17, 17), dtype=dtype)\n image[2:15, 5:13] = value\n\n image_aug = aug.augment_image(image)\n image_exp = aug_flip.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert _compute_matching(\n image_aug, image_exp, self.rot_mask_inner\n ) > self.rot_thresh_inner_float(order)\n assert _compute_matching(\n image_aug, image_exp, self.rot_mask_outer\n ) > self.rot_thresh_outer_float(order)\n\n def test_translate_cv2_order_0_bool(self):\n aug = iaa.Affine(translate_px={\"x\": 1}, order=0, mode=\"constant\",\n backend=\"cv2\")\n\n image = np.zeros((3, 3), dtype=bool)\n image[1, 1] = True\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert np.all(image_aug[~self.translate_mask] == 0)\n assert np.all(image_aug[self.translate_mask] == 1)\n\n def test_translate_cv2_order_0_uint_int(self):\n aug = iaa.Affine(translate_px={\"x\": 1}, order=0, mode=\"constant\",\n backend=\"cv2\")\n\n dtypes = [\"uint8\", \"uint16\", \"int8\", \"int16\", \"int32\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n if np.dtype(dtype).kind == \"i\":\n values = [1, 5, 10, 100, int(0.1 * max_value),\n int(0.2 * max_value), int(0.5 * max_value),\n max_value - 100, max_value]\n values = values + [(-1) * value for value in values]\n else:\n values = [1, 5, 10, 100, int(center_value),\n int(0.1 * max_value), int(0.2 * max_value),\n int(0.5 * max_value), max_value - 100, max_value]\n\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert np.all(image_aug[~self.translate_mask] == 0)\n assert np.all(image_aug[self.translate_mask] == value)\n\n def test_translate_cv2_order_0_float(self):\n aug = iaa.Affine(translate_px={\"x\": 1}, order=0, mode=\"constant\",\n backend=\"cv2\")\n\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [\n 0.01,\n 1.0,\n 10.0,\n 100.0,\n 500 ** (isize - 1),\n float(np.float64(1000 ** (isize - 1)))\n ]\n values = values + [(-1) * value for value in values]\n values = values + [min_value, max_value]\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((3, 3), dtype=dtype)\n image[1, 1] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert np.all(_isclose(image_aug[~self.translate_mask], 0))\n assert np.all(_isclose(image_aug[self.translate_mask],\n value))\n\n def test_rotate_cv2_order_1_and_3_bool(self):\n # cv2, order=1 and rotate=180\n for order in [1, 3]:\n aug = iaa.Affine(rotate=180, order=order, mode=\"constant\",\n backend=\"cv2\")\n aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])\n\n image = np.zeros((17, 17), dtype=bool)\n image[2:15, 5:13] = True\n\n image_aug = aug.augment_image(image)\n image_exp = aug_flip.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert (np.sum(image_aug == image_exp) / image.size) > 0.9\n\n def test_rotate_cv2_order_1_and_3_uint_int(self):\n # cv2, order=1 and rotate=180\n for order in [1, 3]:\n aug = iaa.Affine(rotate=180, order=order, mode=\"constant\",\n backend=\"cv2\")\n aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])\n\n dtypes = [\"uint8\", \"uint16\", \"int8\", \"int16\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n if np.dtype(dtype).kind == \"i\":\n values = [1, 5, 10, 100, int(0.1 * max_value),\n int(0.2 * max_value), int(0.5 * max_value),\n max_value - 100, max_value]\n values = values + [(-1) * value for value in values]\n else:\n values = [1, 5, 10, 100, int(center_value),\n int(0.1 * max_value), int(0.2 * max_value),\n int(0.5 * max_value), max_value - 100, max_value]\n\n for value in values:\n with self.subTest(order=order, dtype=dtype, value=value):\n image = np.zeros((17, 17), dtype=dtype)\n image[2:15, 5:13] = value\n\n image_aug = aug.augment_image(image)\n image_exp = aug_flip.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert (\n np.sum(image_aug == image_exp) / image.size\n ) > 0.9\n\n def test_rotate_cv2_order_1_and_3_float(self):\n # cv2, order=1 and rotate=180\n for order in [1, 3]:\n aug = iaa.Affine(rotate=180, order=order, mode=\"constant\",\n backend=\"cv2\")\n aug_flip = iaa.Sequential([iaa.Flipud(1.0), iaa.Fliplr(1.0)])\n\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),\n 1000 ** (isize - 1)]\n values = values + [(-1) * value for value in values]\n values = values + [min_value, max_value]\n for value in values:\n with self.subTest(order=order, dtype=dtype, value=value):\n image = np.zeros((17, 17), dtype=dtype)\n image[2:15, 5:13] = value\n\n image_aug = aug.augment_image(image)\n image_exp = aug_flip.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert (\n np.sum(_isclose(image_aug, image_exp)) / image.size\n ) > 0.9\n\n\nclass TestAffine_other(unittest.TestCase):\n def test_unusual_channel_numbers(self):\n with assertWarns(self, iaa.SuspiciousSingleImageShapeWarning):\n nb_channels_lst = [4, 5, 512, 513]\n orders = [0, 1, 3]\n backends = [\"auto\", \"skimage\", \"cv2\"]\n gen = itertools.product(nb_channels_lst, orders, backends)\n for nb_channels, order, backend in gen:\n with self.subTest(nb_channels=nb_channels, order=order,\n backend=backend):\n aug = iaa.Affine(translate_px={\"x\": -1}, mode=\"constant\",\n cval=255, order=order, backend=backend)\n\n image = np.full((3, 3, nb_channels), 128, dtype=np.uint8)\n heatmap_arr = np.full((3, 3, nb_channels), 0.5,\n dtype=np.float32)\n heatmap = ia.HeatmapsOnImage(heatmap_arr, shape=image.shape)\n\n image_aug, heatmap_aug = aug(image=image, heatmaps=heatmap)\n hm_aug_arr = heatmap_aug.arr_0to1\n\n assert image_aug.shape == (3, 3, nb_channels)\n assert heatmap_aug.arr_0to1.shape == (3, 3, nb_channels)\n assert heatmap_aug.shape == image.shape\n assert np.allclose(image_aug[:, 0:2, :], 128, rtol=0,\n atol=2)\n assert np.allclose(image_aug[:, 2:3, 0:3], 255, rtol=0,\n atol=2)\n assert np.allclose(image_aug[:, 2:3, 3:], 255, rtol=0,\n atol=2)\n assert np.allclose(hm_aug_arr[:, 0:2, :], 0.5, rtol=0,\n atol=0.025)\n assert np.allclose(hm_aug_arr[:, 2:3, :], 0.0, rtol=0,\n atol=0.025)\n\n def test_zero_sized_axes(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for fit_output in [False, True]:\n for shape in shapes:\n with self.subTest(shape=shape, fit_output=fit_output):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.Affine(rotate=45, fit_output=fit_output)\n\n image_aug = aug(image=image)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n def test_pickleable(self):\n aug = iaa.Affine(scale=(0.9, 1.1), translate_px=(-4, 4),\n rotate=(-10, 10), shear=(-10, 10), order=[0, 1])\n runtest_pickleable_uint8_img(aug, iterations=20)\n\n\nclass TestScaleX(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init__(self):\n aug = iaa.ScaleX(1.5)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.scale[0].value, 1.5)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test_integrationtest(self):\n image = np.zeros((10, 10), dtype=np.uint8)\n image[5, 5] = 255\n aug = iaa.ScaleX(4.0, order=0)\n\n image_aug = aug(image=image)\n\n xx = np.nonzero(np.max(image_aug, axis=0) > 200)[0]\n yy = np.nonzero(np.max(image_aug, axis=1) > 200)[0]\n x1, x2 = xx[0], xx[-1]\n y1, y2 = yy[0], yy[-1]\n # not >=3, because if e.g. index 1 is spread to 0 to 3 after scaling,\n # it covers four cells (0, 1, 2, 3), but 3-0 is 3\n assert x2 - x1 >= 3\n assert y2 - y1 < 1\n\n\nclass TestScaleY(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init__(self):\n aug = iaa.ScaleY(1.5)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.scale[1].value, 1.5)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test_integrationtest(self):\n image = np.zeros((10, 10), dtype=np.uint8)\n image[5, 5] = 255\n aug = iaa.ScaleY(4.0, order=0)\n\n image_aug = aug(image=image)\n\n xx = np.nonzero(np.max(image_aug, axis=0) > 200)[0]\n yy = np.nonzero(np.max(image_aug, axis=1) > 200)[0]\n x1, x2 = xx[0], xx[-1]\n y1, y2 = yy[0], yy[-1]\n # not >=3, because if e.g. index 1 is spread to 0 to 3 after scaling,\n # it covers four cells (0, 1, 2, 3), but 3-0 is 3\n assert y2 - y1 >= 3\n assert x2 - x1 < 1\n\n\nclass TestTranslateX(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init___translate_percent(self):\n aug = iaa.TranslateX(percent=0.5)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.translate[0].value, 0.5)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test___init___translate_px(self):\n aug = iaa.TranslateX(px=2)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.translate[0].value, 2)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test___init___both_none(self):\n aug = iaa.TranslateX()\n assert np.isclose(aug.translate[0].a.value, -0.25)\n assert np.isclose(aug.translate[0].b.value, 0.25)\n\n def test_integrationtest_translate_percent(self):\n image = np.full((50, 50), 255, dtype=np.uint8)\n aug = iaa.TranslateX(percent=0.5, order=1, cval=0)\n\n image_aug = aug(image=image)\n\n expected = np.copy(image)\n expected[:, 0:25] = 0\n overlap = np.average(np.isclose(image_aug, expected, atol=1.01))\n assert overlap > (1.0 - (1/50) - 1e-4)\n\n def test_integrationtest_translate_px(self):\n image = np.full((50, 50), 255, dtype=np.uint8)\n aug = iaa.TranslateX(px=25, order=1, cval=0)\n\n image_aug = aug(image=image)\n\n expected = np.copy(image)\n expected[:, 0:25] = 0\n overlap = np.average(np.isclose(image_aug, expected, atol=1.01))\n assert overlap > (1.0 - (1/50) - 1e-4)\n\n\nclass TestTranslateY(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init___translate_percent(self):\n aug = iaa.TranslateY(percent=0.5)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.translate[1].value, 0.5)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test___init___translate_px(self):\n aug = iaa.TranslateY(px=2)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.translate[1].value, 2)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test___init___both_none(self):\n aug = iaa.TranslateY()\n assert np.isclose(aug.translate[1].a.value, -0.25)\n assert np.isclose(aug.translate[1].b.value, 0.25)\n\n def test_integrationtest_translate_percent(self):\n image = np.full((50, 50), 255, dtype=np.uint8)\n aug = iaa.TranslateY(percent=0.5, order=1, cval=0)\n\n image_aug = aug(image=image)\n\n expected = np.copy(image)\n expected[0:25, :] = 0\n overlap = np.average(np.isclose(image_aug, expected, atol=1.01))\n assert overlap > (1.0 - (1/50) - 1e-4)\n\n def test_integrationtest_translate_px(self):\n image = np.full((50, 50), 255, dtype=np.uint8)\n aug = iaa.TranslateY(px=25, order=1, cval=0)\n\n image_aug = aug(image=image)\n\n expected = np.copy(image)\n expected[0:25, :] = 0\n overlap = np.average(np.isclose(image_aug, expected, atol=1.01))\n assert overlap > (1.0 - (1/50) - 1e-4)\n\n\nclass TestRotate(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init___(self):\n aug = iaa.Rotate(rotate=45)\n assert isinstance(aug, iaa.Affine)\n assert np.isclose(aug.rotate.value, 45)\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test_integrationtest(self):\n image = np.zeros((40, 20), dtype=np.uint8)\n image[:, 10:10+1] = 255\n aug = iaa.Rotate(90, order=0)\n\n image_aug = aug(image=image)\n\n assert image_aug.shape == (40, 20)\n assert np.isclose(np.sum(image_aug[20-1:20+2, :]), 255*20, atol=1)\n\n\nclass TestShearX(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init__(self):\n aug = iaa.ShearX(40)\n assert isinstance(aug, iaa.Affine)\n assert aug.shear[0].value == 40\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test_integrationtest(self):\n def _find_coords(arr):\n xx = np.nonzero(np.max(arr, axis=0) > 200)[0]\n yy = np.nonzero(np.max(arr, axis=1) > 200)[0]\n x1 = xx[0]\n x2 = xx[-1]\n y1 = yy[0]\n y2 = yy[-1]\n return x1+(x2-x1)/2, y1+(y2-y1)/2\n\n image = np.zeros((50, 50, 4), dtype=np.uint8)\n image[10:10+1, 20:20+1, 0] = 255\n image[10:10+1, 30:30+1, 1] = 255\n image[40:40+1, 30:30+1, 2] = 255\n image[40:40+1, 20:20+1, 3] = 255\n aug = iaa.ShearX(30, order=0)\n\n image_aug = aug(image=image)\n\n x1, y1 = _find_coords(image_aug[..., 0])\n x2, y2 = _find_coords(image_aug[..., 1])\n x3, y3 = _find_coords(image_aug[..., 2])\n x4, y4 = _find_coords(image_aug[..., 3])\n assert x1 > 20\n assert np.isclose(y1, 10.0)\n assert np.isclose(y2, 10.0)\n assert x3 < 30\n assert np.isclose(y3, 40.0)\n assert np.isclose(y4, 40.0)\n assert not np.isclose(x1, x4)\n assert not np.isclose(x2, x3)\n\n\nclass TestShearY(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init__(self):\n aug = iaa.ShearY(40)\n assert isinstance(aug, iaa.Affine)\n assert aug.shear[1].value == 40\n assert aug.order.value == 1\n assert aug.cval.value == 0\n assert aug.mode.value == \"constant\"\n assert aug.fit_output is False\n\n def test_integrationtest(self):\n def _find_coords(arr):\n xx = np.nonzero(np.max(arr, axis=0) > 200)[0]\n yy = np.nonzero(np.max(arr, axis=1) > 200)[0]\n x1 = xx[0]\n x2 = xx[-1]\n y1 = yy[0]\n y2 = yy[-1]\n return x1+(x2-x1)/2, y1+(y2-y1)/2\n\n image = np.zeros((50, 50, 4), dtype=np.uint8)\n image[20:20+1, 10:10+1, 0] = 255\n image[20:20+1, 40:40+1, 1] = 255\n image[30:30+1, 40:40+1, 2] = 255\n image[30:30+1, 10:10+1, 3] = 255\n aug = iaa.ShearY(30, order=0)\n\n image_aug = aug(image=image)\n\n x1, y1 = _find_coords(image_aug[..., 0])\n x2, y2 = _find_coords(image_aug[..., 1])\n x3, y3 = _find_coords(image_aug[..., 2])\n x4, y4 = _find_coords(image_aug[..., 3])\n assert y1 < 20\n assert np.isclose(x1, 10.0)\n assert np.isclose(x4, 10.0)\n assert y2 > 20\n assert np.isclose(x2, 40.0)\n assert np.isclose(x3, 40.0)\n assert not np.isclose(y1, y2)\n assert not np.isclose(y3, y4)\n\n\n# TODO migrate to unittest and split up tests or remove AffineCv2\ndef test_AffineCv2():\n reseed()\n\n with warnings.catch_warnings(record=True) as caught_warnings:\n warnings.simplefilter(\"always\")\n _ = iaa.AffineCv2()\n\n assert \"is deprecated\" in str(caught_warnings[0].message)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=ia.DeprecationWarning)\n\n base_img = np.array([[0, 0, 0],\n [0, 255, 0],\n [0, 0, 0]], dtype=np.uint8)\n base_img = base_img[:, :, np.newaxis]\n\n images = np.array([base_img])\n images_list = [base_img]\n outer_pixels = ([], [])\n for i in sm.xrange(base_img.shape[0]):\n for j in sm.xrange(base_img.shape[1]):\n if i != j:\n outer_pixels[0].append(i)\n outer_pixels[1].append(j)\n\n kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=2)]\n keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]\n\n # no translation/scale/rotate/shear, shouldnt change nothing\n aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug.augment_images(images)\n expected = images\n assert np.array_equal(observed, expected)\n\n observed = aug_det.augment_images(images)\n expected = images\n assert np.array_equal(observed, expected)\n\n observed = aug.augment_images(images_list)\n expected = images_list\n assert array_equal_lists(observed, expected)\n\n observed = aug_det.augment_images(images_list)\n expected = images_list\n assert array_equal_lists(observed, expected)\n\n observed = aug.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n observed = aug_det.augment_keypoints(keypoints)\n expected = keypoints\n assert keypoints_equal(observed, expected)\n\n # ---------------------\n # scale\n # ---------------------\n # zoom in\n aug = iaa.AffineCv2(scale=1.75, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug.augment_images(images)\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n observed = aug_det.augment_images(images)\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n observed = aug.augment_images(images_list)\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n observed = aug_det.augment_images(images_list)\n assert observed[0][1, 1] > 250\n assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()\n assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()\n\n observed = aug.augment_keypoints(keypoints)\n assert observed[0].keypoints[0].x < 0\n assert observed[0].keypoints[0].y < 0\n assert observed[0].keypoints[1].x == 1\n assert observed[0].keypoints[1].y == 1\n assert observed[0].keypoints[2].x > 2\n assert observed[0].keypoints[2].y > 2\n\n observed = aug_det.augment_keypoints(keypoints)\n assert observed[0].keypoints[0].x < 0\n assert observed[0].keypoints[0].y < 0\n assert observed[0].keypoints[1].x == 1\n assert observed[0].keypoints[1].y == 1\n assert observed[0].keypoints[2].x > 2\n assert observed[0].keypoints[2].y > 2\n\n # zoom in only on x axis\n aug = iaa.AffineCv2(scale={\"x\": 1.75, \"y\": 1.0}, translate_px=0,\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug.augment_images(images)\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n observed = aug_det.augment_images(images)\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n observed = aug.augment_images(images_list)\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n observed = aug_det.augment_images(images_list)\n assert observed[0][1, 1] > 250\n assert (observed[0][[1, 1], [0, 2]] > 20).all()\n assert (observed[0][[1, 1], [0, 2]] < 150).all()\n assert (observed[0][0, :] < 5).all()\n assert (observed[0][2, :] < 5).all()\n\n observed = aug.augment_keypoints(keypoints)\n assert observed[0].keypoints[0].x < 0\n assert observed[0].keypoints[0].y == 0\n assert observed[0].keypoints[1].x == 1\n assert observed[0].keypoints[1].y == 1\n assert observed[0].keypoints[2].x > 2\n assert observed[0].keypoints[2].y == 2\n\n observed = aug_det.augment_keypoints(keypoints)\n assert observed[0].keypoints[0].x < 0\n assert observed[0].keypoints[0].y == 0\n assert observed[0].keypoints[1].x == 1\n assert observed[0].keypoints[1].y == 1\n assert observed[0].keypoints[2].x > 2\n assert observed[0].keypoints[2].y == 2\n\n # zoom in only on y axis\n aug = iaa.AffineCv2(scale={\"x\": 1.0, \"y\": 1.75}, translate_px=0,\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n observed = aug.augment_images(images)\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n observed = aug_det.augment_images(images)\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n observed = aug.augment_images(images_list)\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n observed = aug_det.augment_images(images_list)\n assert observed[0][1, 1] > 250\n assert (observed[0][[0, 2], [1, 1]] > 20).all()\n assert (observed[0][[0, 2], [1, 1]] < 150).all()\n assert (observed[0][:, 0] < 5).all()\n assert (observed[0][:, 2] < 5).all()\n\n observed = aug.augment_keypoints(keypoints)\n assert observed[0].keypoints[0].x == 0\n assert observed[0].keypoints[0].y < 0\n assert observed[0].keypoints[1].x == 1\n assert observed[0].keypoints[1].y == 1\n assert observed[0].keypoints[2].x == 2\n assert observed[0].keypoints[2].y > 2\n\n observed = aug_det.augment_keypoints(keypoints)\n assert observed[0].keypoints[0].x == 0\n assert observed[0].keypoints[0].y < 0\n assert observed[0].keypoints[1].x == 1\n assert observed[0].keypoints[1].y == 1\n assert observed[0].keypoints[2].x == 2\n assert observed[0].keypoints[2].y > 2\n\n # zoom out\n # this one uses a 4x4 area of all 255, which is zoomed out to a 4x4\n # area in which the center 2x2 area is 255\n # zoom in should probably be adapted to this style\n # no separate tests here for x/y axis, should work fine if zoom in\n # works with that\n aug = iaa.AffineCv2(scale=0.49, translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.ones((4, 4, 1), dtype=np.uint8) * 255\n images = np.array([image])\n images_list = [image]\n outer_pixels = ([], [])\n for y in sm.xrange(4):\n xs = sm.xrange(4) if y in [0, 3] else [0, 3]\n for x in xs:\n outer_pixels[0].append(y)\n outer_pixels[1].append(x)\n inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])\n kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),\n ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)]\n keypoints = [ia.KeypointsOnImage(kps, shape=image.shape)]\n kps_aug = [ia.Keypoint(x=0.765, y=0.765),\n ia.Keypoint(x=2.235, y=0.765),\n ia.Keypoint(x=0.765, y=2.235),\n ia.Keypoint(x=2.235, y=2.235)]\n keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=image.shape)]\n\n observed = aug.augment_images(images)\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n observed = aug_det.augment_images(images)\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n observed = aug.augment_images(images_list)\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n observed = aug_det.augment_images(images_list)\n assert (observed[0][outer_pixels] < 25).all()\n assert (observed[0][inner_pixels] > 200).all()\n\n observed = aug.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n observed = aug_det.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n # varying scales\n aug = iaa.AffineCv2(scale={\"x\": (0.5, 1.5), \"y\": (0.5, 1.5)},\n translate_px=0, rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.array([[0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 2, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]], dtype=np.uint8) * 100\n image = image[:, :, np.newaxis]\n images = np.array([image])\n\n last_aug = None\n last_aug_det = None\n nb_changed_aug = 0\n nb_changed_aug_det = 0\n nb_iterations = 1000\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n assert nb_changed_aug >= int(nb_iterations * 0.8)\n assert nb_changed_aug_det == 0\n\n aug = iaa.AffineCv2(scale=iap.Uniform(0.7, 0.9))\n assert is_parameter_instance(aug.scale, iap.Uniform)\n assert is_parameter_instance(aug.scale.a, iap.Deterministic)\n assert is_parameter_instance(aug.scale.b, iap.Deterministic)\n assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8\n assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8\n\n # ---------------------\n # translate\n # ---------------------\n # move one pixel to the right\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 1, \"y\": 0},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.zeros((3, 3, 1), dtype=np.uint8)\n image_aug = np.copy(image)\n image[1, 1] = 255\n image_aug[1, 2] = 255\n images = np.array([image])\n images_aug = np.array([image_aug])\n images_list = [image]\n images_aug_list = [image_aug]\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],\n shape=base_img.shape)]\n keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],\n shape=base_img.shape)]\n\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug_det.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug_det.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n observed = aug_det.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n # move one pixel to the right\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 1, \"y\": 0},\n rotate=0, shear=0)\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n # move one pixel to the right\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 1, \"y\": 0},\n rotate=0, shear=0)\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n # move one pixel to the right\n # with order=ALL\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 1, \"y\": 0},\n rotate=0, shear=0, order=ia.ALL)\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n # move one pixel to the right\n # with order=list\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 1, \"y\": 0},\n rotate=0, shear=0, order=[0, 1, 2])\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n # move one pixel to the right\n # with order=StochasticParameter\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 1, \"y\": 0},\n rotate=0, shear=0, order=iap.Choice([0, 1, 2]))\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n # move one pixel to the bottom\n aug = iaa.AffineCv2(scale=1.0, translate_px={\"x\": 0, \"y\": 1},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.zeros((3, 3, 1), dtype=np.uint8)\n image_aug = np.copy(image)\n image[1, 1] = 255\n image_aug[2, 1] = 255\n images = np.array([image])\n images_aug = np.array([image_aug])\n images_list = [image]\n images_aug_list = [image_aug]\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],\n shape=base_img.shape)]\n keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],\n shape=base_img.shape)]\n\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug_det.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug_det.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n observed = aug_det.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n # move 33% (one pixel) to the right\n aug = iaa.AffineCv2(scale=1.0, translate_percent={\"x\": 0.3333, \"y\": 0},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.zeros((3, 3, 1), dtype=np.uint8)\n image_aug = np.copy(image)\n image[1, 1] = 255\n image_aug[1, 2] = 255\n images = np.array([image])\n images_aug = np.array([image_aug])\n images_list = [image]\n images_aug_list = [image_aug]\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],\n shape=base_img.shape)]\n keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)],\n shape=base_img.shape)]\n\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug_det.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug_det.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n observed = aug_det.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n # move 33% (one pixel) to the bottom\n aug = iaa.AffineCv2(scale=1.0, translate_percent={\"x\": 0, \"y\": 0.3333},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.zeros((3, 3, 1), dtype=np.uint8)\n image_aug = np.copy(image)\n image[1, 1] = 255\n image_aug[2, 1] = 255\n images = np.array([image])\n images_aug = np.array([image_aug])\n images_list = [image]\n images_aug_list = [image_aug]\n keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)],\n shape=base_img.shape)]\n keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],\n shape=base_img.shape)]\n\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug_det.augment_images(images)\n assert np.array_equal(observed, images_aug)\n\n observed = aug.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug_det.augment_images(images_list)\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n observed = aug_det.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n # 0-1px to left/right and 0-1px to top/bottom\n aug = iaa.AffineCv2(scale=1.0,\n translate_px={\"x\": (-1, 1), \"y\": (-1, 1)},\n rotate=0, shear=0)\n aug_det = aug.to_deterministic()\n last_aug = None\n last_aug_det = None\n nb_changed_aug = 0\n nb_changed_aug_det = 0\n nb_iterations = 1000\n centers_aug = np.copy(image).astype(np.int32) * 0\n centers_aug_det = np.copy(image).astype(np.int32) * 0\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n\n assert len(observed_aug[0].nonzero()[0]) == 1\n assert len(observed_aug_det[0].nonzero()[0]) == 1\n centers_aug += (observed_aug[0] > 0)\n centers_aug_det += (observed_aug_det[0] > 0)\n\n assert nb_changed_aug >= int(nb_iterations * 0.7)\n assert nb_changed_aug_det == 0\n assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()\n assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()\n\n aug = iaa.AffineCv2(translate_percent=iap.Uniform(0.7, 0.9))\n assert is_parameter_instance(aug.translate, iap.Uniform)\n assert is_parameter_instance(aug.translate.a, iap.Deterministic)\n assert is_parameter_instance(aug.translate.b, iap.Deterministic)\n assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8\n assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8\n\n aug = iaa.AffineCv2(translate_px=iap.DiscreteUniform(1, 10))\n assert is_parameter_instance(aug.translate, iap.DiscreteUniform)\n assert is_parameter_instance(aug.translate.a, iap.Deterministic)\n assert is_parameter_instance(aug.translate.b, iap.Deterministic)\n assert aug.translate.a.value == 1\n assert aug.translate.b.value == 10\n\n # ---------------------\n # translate heatmaps\n # ---------------------\n heatmaps = HeatmapsOnImage(\n np.float32([\n [0.0, 0.5, 0.75],\n [0.0, 0.5, 0.75],\n [0.75, 0.75, 0.75],\n ]),\n shape=(3, 3, 3)\n )\n arr_expected_1px_right = np.float32([\n [0.0, 0.0, 0.5],\n [0.0, 0.0, 0.5],\n [0.0, 0.75, 0.75],\n ])\n aug = iaa.AffineCv2(translate_px={\"x\": 1})\n observed = aug.augment_heatmaps([heatmaps])[0]\n assert observed.shape == heatmaps.shape\n assert np.isclose(observed.min_value, heatmaps.min_value,\n rtol=0, atol=1e-6)\n assert np.isclose(observed.max_value, heatmaps.max_value,\n rtol=0, atol=1e-6)\n assert np.array_equal(observed.get_arr(), arr_expected_1px_right)\n\n # should still use mode=constant cval=0 even when other settings chosen\n aug = iaa.AffineCv2(translate_px={\"x\": 1}, cval=255)\n observed = aug.augment_heatmaps([heatmaps])[0]\n assert observed.shape == heatmaps.shape\n assert np.isclose(observed.min_value, heatmaps.min_value,\n rtol=0, atol=1e-6)\n assert np.isclose(observed.max_value, heatmaps.max_value,\n rtol=0, atol=1e-6)\n assert np.array_equal(observed.get_arr(), arr_expected_1px_right)\n\n aug = iaa.AffineCv2(translate_px={\"x\": 1}, mode=\"replicate\", cval=255)\n observed = aug.augment_heatmaps([heatmaps])[0]\n assert observed.shape == heatmaps.shape\n assert np.isclose(observed.min_value, heatmaps.min_value,\n rtol=0, atol=1e-6)\n assert np.isclose(observed.max_value, heatmaps.max_value,\n rtol=0, atol=1e-6)\n assert np.array_equal(observed.get_arr(), arr_expected_1px_right)\n\n # ---------------------\n # translate segmaps\n # ---------------------\n segmaps = SegmentationMapsOnImage(\n np.int32([\n [0, 1, 2],\n [0, 1, 2],\n [2, 2, 2],\n ]),\n shape=(3, 3, 3)\n )\n arr_expected_1px_right = np.int32([\n [0, 0, 1],\n [0, 0, 1],\n [0, 2, 2],\n ])\n aug = iaa.AffineCv2(translate_px={\"x\": 1})\n observed = aug.augment_segmentation_maps([segmaps])[0]\n assert observed.shape == segmaps.shape\n assert np.array_equal(observed.get_arr(), arr_expected_1px_right)\n\n # should still use mode=constant cval=0 even when other settings chosen\n aug = iaa.AffineCv2(translate_px={\"x\": 1}, cval=255)\n observed = aug.augment_segmentation_maps([segmaps])[0]\n assert observed.shape == segmaps.shape\n assert np.array_equal(observed.get_arr(), arr_expected_1px_right)\n\n aug = iaa.AffineCv2(translate_px={\"x\": 1}, mode=\"replicate\", cval=255)\n observed = aug.augment_segmentation_maps([segmaps])[0]\n assert observed.shape == segmaps.shape\n assert np.array_equal(observed.get_arr(), arr_expected_1px_right)\n\n # ---------------------\n # rotate\n # ---------------------\n # rotate by 45 degrees\n aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=90, shear=0)\n aug_det = aug.to_deterministic()\n\n image = np.zeros((3, 3, 1), dtype=np.uint8)\n image_aug = np.copy(image)\n image[1, :] = 255\n image_aug[0, 1] = 255\n image_aug[1, 1] = 255\n image_aug[2, 1] = 255\n images = np.array([image])\n images_aug = np.array([image_aug])\n images_list = [image]\n images_aug_list = [image_aug]\n kps = [ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=2, y=1)]\n keypoints = [ia.KeypointsOnImage(kps, shape=base_img.shape)]\n kps_aug = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),\n ia.Keypoint(x=1, y=2)]\n keypoints_aug = [ia.KeypointsOnImage(kps_aug, shape=base_img.shape)]\n\n observed = aug.augment_images(images)\n observed[observed >= 100] = 255\n observed[observed < 100] = 0\n assert np.array_equal(observed, images_aug)\n\n observed = aug_det.augment_images(images)\n observed[observed >= 100] = 255\n observed[observed < 100] = 0\n assert np.array_equal(observed, images_aug)\n\n observed = aug.augment_images(images_list)\n observed[0][observed[0] >= 100] = 255\n observed[0][observed[0] < 100] = 0\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug_det.augment_images(images_list)\n observed[0][observed[0] >= 100] = 255\n observed[0][observed[0] < 100] = 0\n assert array_equal_lists(observed, images_aug_list)\n\n observed = aug.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n observed = aug_det.augment_keypoints(keypoints)\n assert keypoints_equal(observed, keypoints_aug)\n\n # rotate by StochasticParameter\n aug = iaa.AffineCv2(scale=1.0, translate_px=0,\n rotate=iap.Uniform(10, 20), shear=0)\n assert is_parameter_instance(aug.rotate, iap.Uniform)\n assert is_parameter_instance(aug.rotate.a, iap.Deterministic)\n assert aug.rotate.a.value == 10\n assert is_parameter_instance(aug.rotate.b, iap.Deterministic)\n assert aug.rotate.b.value == 20\n\n # random rotation 0-364 degrees\n aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=(0, 364),\n shear=0)\n aug_det = aug.to_deterministic()\n last_aug = None\n last_aug_det = None\n nb_changed_aug = 0\n nb_changed_aug_det = 0\n nb_iterations = 1000\n pixels_sums_aug = np.copy(image).astype(np.int32) * 0\n pixels_sums_aug_det = np.copy(image).astype(np.int32) * 0\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n\n pixels_sums_aug += (observed_aug[0] > 100)\n pixels_sums_aug_det += (observed_aug_det[0] > 100)\n\n assert nb_changed_aug >= int(nb_iterations * 0.9)\n assert nb_changed_aug_det == 0\n # center pixel, should always be white when rotating line around center\n assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)\n assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)\n\n # outer pixels, should sometimes be white\n # the values here had to be set quite tolerant, the middle pixels at\n # top/left/bottom/right get more activation than expected\n outer_pixels = ([0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2])\n assert (\n pixels_sums_aug[outer_pixels] > int(nb_iterations * (2/8 * 0.4))\n ).all()\n assert (\n pixels_sums_aug[outer_pixels] < int(nb_iterations * (2/8 * 2.0))\n ).all()\n\n # ---------------------\n # shear\n # ---------------------\n # TODO\n\n # shear by StochasticParameter\n aug = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,\n shear=iap.Uniform(10, 20))\n assert is_parameter_instance(aug.shear, iap.Uniform)\n assert is_parameter_instance(aug.shear.a, iap.Deterministic)\n assert aug.shear.a.value == 10\n assert is_parameter_instance(aug.shear.b, iap.Deterministic)\n assert aug.shear.b.value == 20\n\n # ---------------------\n # cval\n # ---------------------\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=128)\n aug_det = aug.to_deterministic()\n\n image = np.ones((3, 3, 1), dtype=np.uint8) * 255\n image_aug = np.copy(image)\n images = np.array([image])\n images_list = [image]\n\n observed = aug.augment_images(images)\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n observed = aug_det.augment_images(images)\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n observed = aug.augment_images(images_list)\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n observed = aug_det.augment_images(images_list)\n assert (observed[0] > 128 - 30).all()\n assert (observed[0] < 128 + 30).all()\n\n # random cvals\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=(0, 255))\n aug_det = aug.to_deterministic()\n last_aug = None\n last_aug_det = None\n nb_changed_aug = 0\n nb_changed_aug_det = 0\n nb_iterations = 1000\n averages = []\n for i in sm.xrange(nb_iterations):\n observed_aug = aug.augment_images(images)\n observed_aug_det = aug_det.augment_images(images)\n if i == 0:\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n else:\n if not np.array_equal(observed_aug, last_aug):\n nb_changed_aug += 1\n if not np.array_equal(observed_aug_det, last_aug_det):\n nb_changed_aug_det += 1\n last_aug = observed_aug\n last_aug_det = observed_aug_det\n\n averages.append(int(np.average(observed_aug)))\n\n assert nb_changed_aug >= int(nb_iterations * 0.9)\n assert nb_changed_aug_det == 0\n # center pixel, should always be white when rotating line around center\n assert pixels_sums_aug[1, 1] > (nb_iterations * 0.98)\n assert pixels_sums_aug[1, 1] < (nb_iterations * 1.02)\n assert len(set(averages)) > 200\n\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=ia.ALL)\n assert is_parameter_instance(aug.cval, iap.DiscreteUniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 0\n assert aug.cval.b.value == 255\n\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=iap.DiscreteUniform(1, 5))\n assert is_parameter_instance(aug.cval, iap.DiscreteUniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 1\n assert aug.cval.b.value == 5\n\n # ------------\n # mode\n # ------------\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=ia.ALL)\n assert is_parameter_instance(aug.mode, iap.Choice)\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=\"replicate\")\n assert is_parameter_instance(aug.mode, iap.Deterministic)\n assert aug.mode.value == \"replicate\"\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0, mode=[\"replicate\", \"reflect\"])\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert (\n len(aug.mode.a) == 2\n and \"replicate\" in aug.mode.a\n and \"reflect\" in aug.mode.a)\n aug = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0, shear=0,\n cval=0,\n mode=iap.Choice([\"replicate\", \"reflect\"]))\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert (\n len(aug.mode.a) == 2\n and \"replicate\" in aug.mode.a\n and \"reflect\" in aug.mode.a)\n\n # ------------\n # exceptions for bad inputs\n # ------------\n # scale\n got_exception = False\n try:\n _ = iaa.AffineCv2(scale=False)\n except Exception:\n got_exception = True\n assert got_exception\n\n # translate_px\n got_exception = False\n try:\n _ = iaa.AffineCv2(translate_px=False)\n except Exception:\n got_exception = True\n assert got_exception\n\n # translate_percent\n got_exception = False\n try:\n _ = iaa.AffineCv2(translate_percent=False)\n except Exception:\n got_exception = True\n assert got_exception\n\n # rotate\n got_exception = False\n try:\n _ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=False,\n shear=0, cval=0)\n except Exception:\n got_exception = True\n assert got_exception\n\n # shear\n got_exception = False\n try:\n _ = iaa.AffineCv2(scale=1.0, translate_px=0, rotate=0,\n shear=False, cval=0)\n except Exception:\n got_exception = True\n assert got_exception\n\n # cval\n got_exception = False\n try:\n _ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,\n shear=0, cval=None)\n except Exception:\n got_exception = True\n assert got_exception\n\n # mode\n got_exception = False\n try:\n _ = iaa.AffineCv2(scale=1.0, translate_px=100, rotate=0,\n shear=0, cval=0, mode=False)\n except Exception:\n got_exception = True\n assert got_exception\n\n # non-existent order\n got_exception = False\n try:\n _ = iaa.AffineCv2(order=-1)\n except Exception:\n got_exception = True\n assert got_exception\n\n # bad order datatype\n got_exception = False\n try:\n _ = iaa.AffineCv2(order=\"test\")\n except Exception:\n got_exception = True\n assert got_exception\n\n # ----------\n # get_parameters\n # ----------\n aug = iaa.AffineCv2(scale=1, translate_px=2, rotate=3, shear=4,\n order=1, cval=0, mode=\"constant\")\n params = aug.get_parameters()\n assert is_parameter_instance(params[0], iap.Deterministic) # scale\n assert is_parameter_instance(params[1], iap.Deterministic) # translate\n assert is_parameter_instance(params[2], iap.Deterministic) # rotate\n assert is_parameter_instance(params[3], iap.Deterministic) # shear\n assert params[0].value == 1 # scale\n assert params[1].value == 2 # translate\n assert params[2].value == 3 # rotate\n assert params[3].value == 4 # shear\n assert params[4].value == 1 # order\n assert params[5].value == 0 # cval\n assert params[6].value == \"constant\" # mode\n\n\nclass TestPiecewiseAffine(unittest.TestCase):\n def setUp(self):\n reseed()\n\n @property\n def image(self):\n img = np.zeros((60, 80), dtype=np.uint8)\n img[:, 9:11+1] = 255\n img[:, 69:71+1] = 255\n return img\n\n @property\n def mask(self):\n return self.image > 0\n\n @property\n def heatmaps(self):\n return HeatmapsOnImage((self.image / 255.0).astype(np.float32),\n shape=(60, 80, 3))\n\n @property\n def segmaps(self):\n return SegmentationMapsOnImage(self.mask.astype(np.int32),\n shape=(60, 80, 3))\n\n # -----\n # __init__\n # -----\n def test___init___scale_is_list(self):\n # scale as list\n aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)\n assert is_parameter_instance(aug.scale, iap.Choice)\n assert 0.01 - 1e-8 < aug.scale.a[0] < 0.01 + 1e-8\n assert 0.10 - 1e-8 < aug.scale.a[1] < 0.10 + 1e-8\n\n def test___init___scale_is_tuple(self):\n # scale as tuple\n aug = iaa.PiecewiseAffine(scale=(0.01, 0.10), nb_rows=12, nb_cols=4)\n assert is_parameter_instance(aug.jitter.scale, iap.Uniform)\n assert is_parameter_instance(aug.jitter.scale.a, iap.Deterministic)\n assert is_parameter_instance(aug.jitter.scale.b, iap.Deterministic)\n assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8\n assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8\n\n def test___init___scale_is_stochastic_parameter(self):\n # scale as StochasticParameter\n aug = iaa.PiecewiseAffine(scale=iap.Uniform(0.01, 0.10), nb_rows=12,\n nb_cols=4)\n assert is_parameter_instance(aug.jitter.scale, iap.Uniform)\n assert is_parameter_instance(aug.jitter.scale.a, iap.Deterministic)\n assert is_parameter_instance(aug.jitter.scale.b, iap.Deterministic)\n assert 0.01 - 1e-8 < aug.jitter.scale.a.value < 0.01 + 1e-8\n assert 0.10 - 1e-8 < aug.jitter.scale.b.value < 0.10 + 1e-8\n\n def test___init___bad_datatype_for_scale_leads_to_failure(self):\n # bad datatype for scale\n got_exception = False\n try:\n _ = iaa.PiecewiseAffine(scale=False, nb_rows=12, nb_cols=4)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___nb_rows_is_list(self):\n # rows as list\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)\n assert is_parameter_instance(aug.nb_rows, iap.Choice)\n assert aug.nb_rows.a[0] == 4\n assert aug.nb_rows.a[1] == 20\n\n def test___init___nb_rows_is_tuple(self):\n # rows as tuple\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=(4, 20), nb_cols=4)\n assert is_parameter_instance(aug.nb_rows, iap.DiscreteUniform)\n assert is_parameter_instance(aug.nb_rows.a, iap.Deterministic)\n assert is_parameter_instance(aug.nb_rows.b, iap.Deterministic)\n assert aug.nb_rows.a.value == 4\n assert aug.nb_rows.b.value == 20\n\n def test___init___nb_rows_is_stochastic_parameter(self):\n # rows as StochasticParameter\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=iap.DiscreteUniform(4, 20),\n nb_cols=4)\n assert is_parameter_instance(aug.nb_rows, iap.DiscreteUniform)\n assert is_parameter_instance(aug.nb_rows.a, iap.Deterministic)\n assert is_parameter_instance(aug.nb_rows.b, iap.Deterministic)\n assert aug.nb_rows.a.value == 4\n assert aug.nb_rows.b.value == 20\n\n def test___init___bad_datatype_for_nb_rows_leads_to_failure(self):\n # bad datatype for rows\n got_exception = False\n try:\n _ = iaa.PiecewiseAffine(scale=0.05, nb_rows=False, nb_cols=4)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___nb_cols_is_list(self):\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])\n assert is_parameter_instance(aug.nb_cols, iap.Choice)\n assert aug.nb_cols.a[0] == 4\n assert aug.nb_cols.a[1] == 20\n\n def test___init___nb_cols_is_tuple(self):\n # cols as tuple\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=(4, 20))\n assert is_parameter_instance(aug.nb_cols, iap.DiscreteUniform)\n assert is_parameter_instance(aug.nb_cols.a, iap.Deterministic)\n assert is_parameter_instance(aug.nb_cols.b, iap.Deterministic)\n assert aug.nb_cols.a.value == 4\n assert aug.nb_cols.b.value == 20\n\n def test___init___nb_cols_is_stochastic_parameter(self):\n # cols as StochasticParameter\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4,\n nb_cols=iap.DiscreteUniform(4, 20))\n assert is_parameter_instance(aug.nb_cols, iap.DiscreteUniform)\n assert is_parameter_instance(aug.nb_cols.a, iap.Deterministic)\n assert is_parameter_instance(aug.nb_cols.b, iap.Deterministic)\n assert aug.nb_cols.a.value == 4\n assert aug.nb_cols.b.value == 20\n\n def test___init___bad_datatype_for_nb_cols_leads_to_failure(self):\n # bad datatype for cols\n got_exception = False\n try:\n _aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___order_is_int(self):\n # single int for order\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, order=0)\n assert is_parameter_instance(aug.order, iap.Deterministic)\n assert aug.order.value == 0\n\n def test___init___order_is_list(self):\n # list for order\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n order=[0, 1, 3])\n assert is_parameter_instance(aug.order, iap.Choice)\n assert all([v in aug.order.a for v in [0, 1, 3]])\n\n def test___init___order_is_stochastic_parameter(self):\n # StochasticParameter for order\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n order=iap.Choice([0, 1, 3]))\n assert is_parameter_instance(aug.order, iap.Choice)\n assert all([v in aug.order.a for v in [0, 1, 3]])\n\n def test___init___order_is_all(self):\n # ALL for order\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n order=ia.ALL)\n assert is_parameter_instance(aug.order, iap.Choice)\n assert all([v in aug.order.a for v in [0, 1, 3, 4, 5]])\n\n def test___init___bad_datatype_for_order_leads_to_failure(self):\n # bad datatype for order\n got_exception = False\n try:\n _ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n order=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___cval_is_list(self):\n # cval as list\n aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,\n mode=\"constant\", cval=[0, 10])\n assert is_parameter_instance(aug.cval, iap.Choice)\n assert aug.cval.a[0] == 0\n assert aug.cval.a[1] == 10\n\n def test___init___cval_is_tuple(self):\n # cval as tuple\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n mode=\"constant\", cval=(0, 10))\n assert is_parameter_instance(aug.cval, iap.Uniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 0\n assert aug.cval.b.value == 10\n\n def test___init___cval_is_stochastic_parameter(self):\n # cval as StochasticParameter\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n mode=\"constant\",\n cval=iap.DiscreteUniform(0, 10))\n assert is_parameter_instance(aug.cval, iap.DiscreteUniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 0\n assert aug.cval.b.value == 10\n\n def test___init___cval_is_all(self):\n # ALL as cval\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n mode=\"constant\", cval=ia.ALL)\n assert is_parameter_instance(aug.cval, iap.Uniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 0\n assert aug.cval.b.value == 255\n\n def test___init___bad_datatype_for_cval_leads_to_failure(self):\n # bas datatype for cval\n got_exception = False\n try:\n _ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, cval=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___mode_is_string(self):\n # single string for mode\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n mode=\"nearest\")\n assert is_parameter_instance(aug.mode, iap.Deterministic)\n assert aug.mode.value == \"nearest\"\n\n def test___init___mode_is_list(self):\n # list for mode\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n mode=[\"nearest\", \"edge\", \"symmetric\"])\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert all([\n v in aug.mode.a for v in [\"nearest\", \"edge\", \"symmetric\"]\n ])\n\n def test___init___mode_is_stochastic_parameter(self):\n # StochasticParameter for mode\n aug = iaa.PiecewiseAffine(\n scale=0.1, nb_rows=8, nb_cols=8,\n mode=iap.Choice([\"nearest\", \"edge\", \"symmetric\"]))\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert all([\n v in aug.mode.a for v in [\"nearest\", \"edge\", \"symmetric\"]\n ])\n\n def test___init___mode_is_all(self):\n # ALL for mode\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8, mode=ia.ALL)\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert all([\n v in aug.mode.a\n for v\n in [\"constant\", \"edge\", \"symmetric\", \"reflect\", \"wrap\"]\n ])\n\n def test___init___bad_datatype_for_mode_leads_to_failure(self):\n # bad datatype for mode\n got_exception = False\n try:\n _ = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=8,\n mode=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n # -----\n # scale\n # -----\n def test_scale_is_small_image(self):\n # basic test\n aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)\n\n observed = aug.augment_image(self.image)\n\n assert (\n 100.0\n < np.average(observed[self.mask])\n < np.average(self.image[self.mask])\n )\n assert (\n 100.0-75.0\n > np.average(observed[~self.mask])\n > np.average(self.image[~self.mask])\n )\n\n def test_scale_is_small_image_absolute_scale(self):\n aug = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n\n observed = aug.augment_image(self.image)\n\n assert (\n 100.0\n < np.average(observed[self.mask])\n < np.average(self.image[self.mask])\n )\n assert (\n 100.0-75.0\n > np.average(observed[~self.mask])\n > np.average(self.image[~self.mask])\n )\n\n def test_scale_is_small_heatmaps(self):\n # basic test, heatmaps\n aug = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n observed_arr = observed.get_arr()\n assert observed.shape == self.heatmaps.shape\n _assert_same_min_max(observed, self.heatmaps)\n assert (\n 100.0/255.0\n < np.average(observed_arr[self.mask])\n < np.average(self.heatmaps.get_arr()[self.mask]))\n assert (\n (100.0-75.0)/255.0\n > np.average(observed_arr[~self.mask])\n > np.average(self.heatmaps.get_arr()[~self.mask]))\n\n def test_scale_is_small_segmaps(self):\n # basic test, segmaps\n aug = iaa.PiecewiseAffine(scale=0.001, nb_rows=12, nb_cols=4)\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n observed_arr = observed.get_arr()\n # left column starts at 9-11 and right one at 69-71\n # result is 9-11 (curvy, i.e. like 50% filled) and 70-71 (straight,\n # i.e. 100% filled). Reason for that is unclear, maybe a scikit-image\n # problem.\n observed_arr_left_col = observed_arr[:, 9:11+1]\n observed_arr_right_col = observed_arr[:, 69:71+1]\n assert observed.shape == self.segmaps.shape\n assert np.average(observed_arr_left_col == 1) > 0.5\n assert np.average(observed_arr_right_col == 1) > 0.5\n assert np.average(observed_arr[~self.mask] == 0) > 0.9\n\n def test_scale_is_zero_image(self):\n # scale 0\n aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)\n\n observed = aug.augment_image(self.image)\n\n assert np.array_equal(observed, self.image)\n\n def test_scale_is_zero_image_absolute_scale(self):\n aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n\n observed = aug.augment_image(self.image)\n\n assert np.array_equal(observed, self.image)\n\n def test_scale_is_zero_heatmaps(self):\n # scale 0, heatmaps\n aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n observed_arr = observed.get_arr()\n assert observed.shape == self.heatmaps.shape\n _assert_same_min_max(observed, self.heatmaps)\n assert np.array_equal(observed_arr, self.heatmaps.get_arr())\n\n def test_scale_is_zero_segmaps(self):\n # scale 0, segmaps\n aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n observed_arr = observed.get_arr()\n assert observed.shape == self.segmaps.shape\n assert np.array_equal(observed_arr, self.segmaps.get_arr())\n\n def test_scale_is_zero_keypoints(self):\n # scale 0, keypoints\n aug = iaa.PiecewiseAffine(scale=0, nb_rows=12, nb_cols=4)\n kps = [ia.Keypoint(x=5, y=3), ia.Keypoint(x=3, y=8)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(14, 14, 3))\n\n kpsoi_aug = aug.augment_keypoints([kpsoi])[0]\n\n assert_cbaois_equal(kpsoi_aug, kpsoi)\n\n @classmethod\n def _test_scale_is_zero_cbaoi(cls, cbaoi, augf_name):\n aug = iaa.PiecewiseAffine(scale=0, nb_rows=10, nb_cols=10)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(observed, cbaoi)\n\n def test_scale_is_zero_polygons(self):\n exterior = [(10, 10),\n (70, 10), (70, 20), (70, 30), (70, 40),\n (70, 50), (70, 60), (70, 70), (70, 80),\n (70, 90),\n (10, 90),\n (10, 80), (10, 70), (10, 60), (10, 50),\n (10, 40), (10, 30), (10, 20), (10, 10)]\n poly = ia.Polygon(exterior)\n psoi = ia.PolygonsOnImage([poly, poly.shift(x=1, y=1)],\n shape=(100, 80))\n\n self._test_scale_is_zero_cbaoi(psoi, \"augment_polygons\")\n\n def test_scale_is_zero_line_strings(self):\n coords = [(10, 10),\n (70, 10), (70, 20), (70, 30), (70, 40),\n (70, 50), (70, 60), (70, 70), (70, 80),\n (70, 90),\n (10, 90),\n (10, 80), (10, 70), (10, 60), (10, 50),\n (10, 40), (10, 30), (10, 20), (10, 10)]\n ls = ia.LineString(coords)\n lsoi = ia.LineStringsOnImage([ls, ls.shift(x=1, y=1)],\n shape=(100, 80))\n\n self._test_scale_is_zero_cbaoi(lsoi, \"augment_line_strings\")\n\n def test_scale_is_zero_bounding_boxes(self):\n bb = ia.BoundingBox(x1=10, y1=10, x2=70, y2=20)\n bbsoi = ia.BoundingBoxesOnImage([bb, bb.shift(x=1, y=1)],\n shape=(100, 80))\n\n self._test_scale_is_zero_cbaoi(bbsoi, \"augment_bounding_boxes\")\n\n def test_scale_stronger_values_should_increase_changes_images(self):\n # stronger scale should lead to stronger changes\n aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n\n observed1 = aug1.augment_image(self.image)\n observed2 = aug2.augment_image(self.image)\n\n assert (\n np.average(observed1[~self.mask])\n < np.average(observed2[~self.mask])\n )\n\n def test_scale_stronger_values_should_increase_changes_images_abs(self):\n aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n\n observed1 = aug1.augment_image(self.image)\n observed2 = aug2.augment_image(self.image)\n\n assert (\n np.average(observed1[~self.mask])\n < np.average(observed2[~self.mask])\n )\n\n def test_scale_stronger_values_should_increase_changes_heatmaps(self):\n # stronger scale should lead to stronger changes, heatmaps\n aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n \n observed1 = aug1.augment_heatmaps([self.heatmaps])[0]\n observed2 = aug2.augment_heatmaps([self.heatmaps])[0]\n \n observed1_arr = observed1.get_arr()\n observed2_arr = observed2.get_arr()\n assert observed1.shape == self.heatmaps.shape\n assert observed2.shape == self.heatmaps.shape\n _assert_same_min_max(observed1, self.heatmaps)\n _assert_same_min_max(observed2, self.heatmaps)\n assert (\n np.average(observed1_arr[~self.mask])\n < np.average(observed2_arr[~self.mask])\n )\n\n def test_scale_stronger_values_should_increase_changes_heatmaps_abs(self):\n aug1 = iaa.PiecewiseAffine(scale=1, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n aug2 = iaa.PiecewiseAffine(scale=10, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n\n observed1 = aug1.augment_heatmaps([self.heatmaps])[0]\n observed2 = aug2.augment_heatmaps([self.heatmaps])[0]\n\n observed1_arr = observed1.get_arr()\n observed2_arr = observed2.get_arr()\n assert observed1.shape == self.heatmaps.shape\n assert observed2.shape == self.heatmaps.shape\n _assert_same_min_max(observed1, self.heatmaps)\n _assert_same_min_max(observed2, self.heatmaps)\n assert (\n np.average(observed1_arr[~self.mask])\n < np.average(observed2_arr[~self.mask])\n )\n\n def test_scale_stronger_values_should_increase_changes_segmaps(self):\n # stronger scale should lead to stronger changes, segmaps\n aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n\n observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]\n observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]\n\n observed1_arr = observed1.get_arr()\n observed2_arr = observed2.get_arr()\n assert observed1.shape == self.segmaps.shape\n assert observed2.shape == self.segmaps.shape\n assert (\n np.average(observed1_arr[~self.mask] == 0)\n > np.average(observed2_arr[~self.mask] == 0)\n )\n\n def test_scale_alignment_between_images_and_heatmaps(self):\n # strong scale, measure alignment between images and heatmaps\n aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n aug_det = aug.to_deterministic()\n\n img_aug = aug_det.augment_image(self.image)\n hm_aug = aug_det.augment_heatmaps([self.heatmaps])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = hm_aug.arr_0to1 > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (60, 80, 3)\n _assert_same_min_max(hm_aug, self.heatmaps)\n assert (same / img_aug_mask.size) >= 0.98\n\n def test_scale_alignment_between_images_and_segmaps(self):\n # strong scale, measure alignment between images and segmaps\n aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n aug_det = aug.to_deterministic()\n\n img_aug = aug_det.augment_image(self.image)\n segmap_aug = aug_det.augment_segmentation_maps([self.segmaps])[0]\n\n img_aug_mask = (img_aug > 255*0.1)\n segmap_aug_mask = (segmap_aug.arr == 1)\n same = np.sum(img_aug_mask == segmap_aug_mask[:, :, 0])\n assert segmap_aug.shape == (60, 80, 3)\n assert (same / img_aug_mask.size) >= 0.9\n\n def test_scale_alignment_between_images_and_smaller_heatmaps(self):\n # strong scale, measure alignment between images and heatmaps\n # heatmaps here smaller than image\n aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n aug_det = aug.to_deterministic()\n\n heatmaps_small = ia.HeatmapsOnImage(\n (\n ia.imresize_single_image(\n self.image, (30, 40+10), interpolation=\"cubic\"\n ) / 255.0\n ).astype(np.float32),\n shape=(60, 80, 3)\n )\n\n img_aug = aug_det.augment_image(self.image)\n hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, (60, 80), interpolation=\"cubic\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (60, 80, 3)\n assert hm_aug.arr_0to1.shape == (30, 40+10, 1)\n assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.948 actually\n\n def test_scale_alignment_between_images_and_smaller_heatmaps_abs(self):\n # image is 60x80, so a scale of 8 is about 0.1*max(60,80)\n aug = iaa.PiecewiseAffine(scale=8, nb_rows=12, nb_cols=4,\n absolute_scale=True)\n aug_det = aug.to_deterministic()\n\n heatmaps_small = ia.HeatmapsOnImage(\n (\n ia.imresize_single_image(\n self.image, (30, 40+10), interpolation=\"cubic\"\n ) / 255.0\n ).astype(np.float32),\n shape=(60, 80, 3)\n )\n\n img_aug = aug_det.augment_image(self.image)\n hm_aug = aug_det.augment_heatmaps([heatmaps_small])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, (60, 80), interpolation=\"cubic\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (60, 80, 3)\n assert hm_aug.arr_0to1.shape == (30, 40+10, 1)\n assert (same / img_aug_mask.size) >= 0.9 # seems to be 0.930 actually\n\n def test_scale_alignment_between_images_and_smaller_segmaps(self):\n # strong scale, measure alignment between images and segmaps\n # segmaps here smaller than image\n aug = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n aug_det = aug.to_deterministic()\n segmaps_small = SegmentationMapsOnImage(\n (\n ia.imresize_single_image(\n self.image, (30, 40+10), interpolation=\"cubic\"\n ) > 100\n ).astype(np.int32),\n shape=(60, 80, 3)\n )\n\n img_aug = aug_det.augment_image(self.image)\n segmaps_aug = aug_det.augment_segmentation_maps([segmaps_small])[0]\n\n img_aug_mask = img_aug > 255*0.1\n segmaps_aug_mask = (\n ia.imresize_single_image(\n segmaps_aug.arr, (60, 80),\n interpolation=\"nearest\"\n ) == 1\n )\n same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])\n assert segmaps_aug.shape == (60, 80, 3)\n assert segmaps_aug.arr.shape == (30, 40+10, 1)\n assert (same / img_aug_mask.size) >= 0.9\n\n def test_scale_alignment_between_images_and_keypoints(self):\n # strong scale, measure alignment between images and keypoints\n # fairly large scale here, as otherwise keypoints can end up\n # outside of the image plane\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=12, nb_cols=4)\n aug_det = aug.to_deterministic()\n kps = [ia.Keypoint(x=160, y=110), ia.Keypoint(x=140, y=90)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(200, 300, 3))\n img_kps = np.zeros((200, 300, 3), dtype=np.uint8)\n img_kps = kpsoi.draw_on_image(img_kps, color=[255, 255, 255])\n\n img_kps_aug = aug_det.augment_image(img_kps)\n kpsoi_aug = aug_det.augment_keypoints([kpsoi])[0]\n\n assert kpsoi_aug.shape == (200, 300, 3)\n bb1 = ia.BoundingBox(\n x1=kpsoi_aug.keypoints[0].x-1, y1=kpsoi_aug.keypoints[0].y-1,\n x2=kpsoi_aug.keypoints[0].x+1, y2=kpsoi_aug.keypoints[0].y+1)\n bb2 = ia.BoundingBox(\n x1=kpsoi_aug.keypoints[1].x-1, y1=kpsoi_aug.keypoints[1].y-1,\n x2=kpsoi_aug.keypoints[1].x+1, y2=kpsoi_aug.keypoints[1].y+1)\n patch1 = bb1.extract_from_image(img_kps_aug)\n patch2 = bb2.extract_from_image(img_kps_aug)\n assert np.max(patch1) > 150\n assert np.max(patch2) > 150\n assert np.average(img_kps_aug) < 40\n\n # this test was apparently added later on (?) without noticing that\n # a similar test already existed\n def test_scale_alignment_between_images_and_keypoints2(self):\n img = np.zeros((100, 80), dtype=np.uint8)\n img[:, 9:11+1] = 255\n img[:, 69:71+1] = 255\n kps = [ia.Keypoint(x=10, y=20), ia.Keypoint(x=10, y=40),\n ia.Keypoint(x=70, y=20), ia.Keypoint(x=70, y=40)]\n kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)\n\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)\n aug_det = aug.to_deterministic()\n\n observed_img = aug_det.augment_image(img)\n observed_kpsoi = aug_det.augment_keypoints([kpsoi])\n\n assert not keypoints_equal([kpsoi], observed_kpsoi)\n for kp in observed_kpsoi[0].keypoints:\n assert observed_img[int(kp.y), int(kp.x)] > 0\n\n @classmethod\n def _test_scale_alignment_between_images_and_poly_or_line_strings(\n cls, cba_class, cbaoi_class, augf_name):\n img = np.zeros((100, 80), dtype=np.uint8)\n img[:, 10-5:10+5] = 255\n img[:, 70-5:70+5] = 255\n coords = [(10, 10),\n (70, 10), (70, 20), (70, 30), (70, 40),\n (70, 50), (70, 60), (70, 70), (70, 80),\n (70, 90),\n (10, 90),\n (10, 80), (10, 70), (10, 60), (10, 50),\n (10, 40), (10, 30), (10, 20), (10, 10)]\n cba = cba_class(coords)\n cbaoi = cbaoi_class([cba, cba.shift(x=1, y=1)],\n shape=img.shape)\n\n aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)\n aug_det = aug.to_deterministic()\n\n observed_imgs = aug_det.augment_images([img, img])\n observed_cbaois = getattr(aug_det, augf_name)([cbaoi, cbaoi])\n\n for observed_img, observed_cbaoi in zip(observed_imgs, observed_cbaois):\n assert observed_cbaoi.shape == img.shape\n for cba_aug in observed_cbaoi.items:\n if hasattr(cba_aug, \"is_valid\"):\n assert cba_aug.is_valid\n for point_aug in cba_aug.coords:\n x = int(np.round(point_aug[0]))\n y = int(np.round(point_aug[1]))\n assert observed_img[y, x] > 0\n\n def test_scale_alignment_between_images_and_polygons(self):\n self._test_scale_alignment_between_images_and_poly_or_line_strings(\n ia.Polygon, ia.PolygonsOnImage, \"augment_polygons\")\n\n def test_scale_alignment_between_images_and_line_strings(self):\n self._test_scale_alignment_between_images_and_poly_or_line_strings(\n ia.LineString, ia.LineStringsOnImage, \"augment_line_strings\")\n\n def test_scale_alignment_between_images_and_bounding_boxes(self):\n img = np.zeros((100, 80), dtype=np.uint8)\n s = 0\n img[10-s:10+s+1, 20-s:20+s+1] = 255\n img[60-s:60+s+1, 70-s:70+s+1] = 255\n bb = ia.BoundingBox(y1=10, x1=20, y2=60, x2=70)\n bbsoi = ia.BoundingBoxesOnImage([bb], shape=img.shape)\n\n aug = iaa.PiecewiseAffine(scale=0.03, nb_rows=10, nb_cols=10)\n\n observed_imgs, observed_bbsois = aug(\n images=[img], bounding_boxes=[bbsoi])\n\n for observed_img, observed_bbsoi in zip(observed_imgs, observed_bbsois):\n assert observed_bbsoi.shape == img.shape\n\n observed_img_x = np.max(observed_img, axis=0)\n observed_img_y = np.max(observed_img, axis=1)\n\n nonz_x = np.nonzero(observed_img_x)[0]\n nonz_y = np.nonzero(observed_img_y)[0]\n\n img_x1 = min(nonz_x)\n img_x2 = max(nonz_x)\n img_y1 = min(nonz_y)\n img_y2 = max(nonz_y)\n expected = ia.BoundingBox(x1=img_x1, y1=img_y1,\n x2=img_x2, y2=img_y2)\n\n for bb_aug in observed_bbsoi.bounding_boxes:\n # we don't expect perfect IoU here, because the actual\n # underlying KP aug used distance maps\n # most IoUs seem to end up in the range 0.9-0.95\n assert bb_aug.iou(expected) > 0.8\n\n def test_scale_is_list(self):\n aug1 = iaa.PiecewiseAffine(scale=0.01, nb_rows=12, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.10, nb_rows=12, nb_cols=4)\n aug = iaa.PiecewiseAffine(scale=[0.01, 0.10], nb_rows=12, nb_cols=4)\n\n avg1 = np.average([\n np.average(\n aug1.augment_image(self.image)\n * (~self.mask).astype(np.float32)\n )\n for _ in sm.xrange(3)\n ])\n avg2 = np.average([\n np.average(\n aug2.augment_image(self.image)\n * (~self.mask).astype(np.float32)\n )\n for _ in sm.xrange(3)\n ])\n seen = [0, 0]\n for _ in sm.xrange(15):\n observed = aug.augment_image(self.image)\n\n avg = np.average(observed * (~self.mask).astype(np.float32))\n diff1 = abs(avg - avg1)\n diff2 = abs(avg - avg2)\n if diff1 < diff2:\n seen[0] += 1\n else:\n seen[1] += 1\n assert seen[0] > 0\n assert seen[1] > 0\n\n # -----\n # rows and cols\n # -----\n @classmethod\n def _compute_observed_std_ygrad_in_mask(cls, observed, mask):\n grad_vert = (\n observed[1:, :].astype(np.float32)\n - observed[:-1, :].astype(np.float32)\n )\n grad_vert = grad_vert * (~mask[1:, :]).astype(np.float32)\n return np.std(grad_vert)\n\n def _compute_std_ygrad_in_mask(self, aug, image, mask, nb_iterations):\n stds = []\n for _ in sm.xrange(nb_iterations):\n observed = aug.augment_image(image)\n\n stds.append(\n self._compute_observed_std_ygrad_in_mask(observed, mask)\n )\n return np.average(stds)\n\n def test_nb_rows_affects_images(self):\n # verify effects of rows\n aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)\n\n std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)\n std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)\n\n assert std1 < std2\n\n def test_nb_rows_is_list_affects_images(self):\n # rows as list\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=[4, 20], nb_cols=4)\n aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=30, nb_cols=4)\n\n std1 = self._compute_std_ygrad_in_mask(aug1, self.image, self.mask, 3)\n std2 = self._compute_std_ygrad_in_mask(aug2, self.image, self.mask, 3)\n\n seen = [0, 0]\n for _ in sm.xrange(20):\n observed = aug.augment_image(self.image)\n\n std = self._compute_observed_std_ygrad_in_mask(observed, self.mask)\n diff1 = abs(std - std1)\n diff2 = abs(std - std2)\n if diff1 < diff2:\n seen[0] += 1\n else:\n seen[1] += 1\n assert seen[0] > 0\n assert seen[1] > 0\n\n def test_nb_cols_affects_images(self):\n # verify effects of cols\n image = self.image.T\n mask = self.mask.T\n\n aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=20, nb_cols=4)\n\n std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)\n std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)\n\n assert std1 < std2\n\n def test_nb_cols_is_list_affects_images(self):\n # cols as list\n image = self.image.T\n mask = self.mask.T\n\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=[4, 20])\n aug1 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=4)\n aug2 = iaa.PiecewiseAffine(scale=0.05, nb_rows=4, nb_cols=30)\n\n std1 = self._compute_std_ygrad_in_mask(aug1, image, mask, 3)\n std2 = self._compute_std_ygrad_in_mask(aug2, image, mask, 3)\n\n seen = [0, 0]\n for _ in sm.xrange(20):\n observed = aug.augment_image(image)\n\n std = self._compute_observed_std_ygrad_in_mask(observed, mask)\n diff1 = abs(std - std1)\n diff2 = abs(std - std2)\n if diff1 < diff2:\n seen[0] += 1\n else:\n seen[1] += 1\n assert seen[0] > 0\n assert seen[1] > 0\n\n # -----\n # order\n # -----\n # TODO\n\n # -----\n # cval\n # -----\n def test_cval_is_zero(self):\n # since scikit-image 0.16.2 and scipy 1.4.0(!), this test requires\n # several iterations to find one image that required filling with cval\n found = False\n for _ in np.arange(50):\n img = np.zeros((16, 16, 3), dtype=np.uint8) + 255\n aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,\n mode=\"constant\", cval=0)\n observed = aug.augment_image(img)\n if np.sum([observed[:, :] == [0, 0, 0]]) > 0:\n found = True\n break\n assert found\n\n def test_cval_should_be_ignored_by_heatmaps(self):\n # cval as deterministic, heatmaps should always use cval=0\n heatmaps = HeatmapsOnImage(\n np.zeros((50, 50, 1), dtype=np.float32), shape=(50, 50, 3))\n aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,\n mode=\"constant\", cval=255)\n observed = aug.augment_heatmaps([heatmaps])[0]\n assert np.sum([observed.get_arr()[:, :] >= 0.01]) == 0\n\n def test_cval_should_be_ignored_by_segmaps(self):\n # cval as deterministic, segmaps should always use cval=0\n segmaps = SegmentationMapsOnImage(\n np.zeros((50, 50, 1), dtype=np.int32), shape=(50, 50, 3))\n aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=10, nb_cols=10,\n mode=\"constant\", cval=255)\n observed = aug.augment_segmentation_maps([segmaps])[0]\n assert np.sum([observed.get_arr()[:, :] > 0]) == 0\n\n def test_cval_is_list(self):\n # cval as list\n img = np.zeros((20, 20), dtype=np.uint8) + 255\n aug = iaa.PiecewiseAffine(scale=0.7, nb_rows=5, nb_cols=5,\n mode=\"constant\", cval=[0, 10])\n\n seen = [0, 0, 0]\n for _ in sm.xrange(30):\n observed = aug.augment_image(img)\n nb_0 = np.sum([observed[:, :] == 0])\n nb_10 = np.sum([observed[:, :] == 10])\n if nb_0 > 0:\n seen[0] += 1\n elif nb_10 > 0:\n seen[1] += 1\n else:\n seen[2] += 1\n assert seen[0] > 5\n assert seen[1] > 5\n assert seen[2] <= 4\n\n # -----\n # mode\n # -----\n # TODO\n\n # ---------\n # remaining keypoints tests\n # ---------\n def test_keypoints_outside_of_image(self):\n # keypoints outside of image\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)\n kps = [ia.Keypoint(x=-10, y=-20)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(10, 10, 3))\n\n observed = aug.augment_keypoints(kpsoi)\n\n assert_cbaois_equal(observed, kpsoi)\n\n def test_keypoints_empty(self):\n # empty keypoints\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)\n kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))\n\n observed = aug.augment_keypoints(kpsoi)\n\n assert_cbaois_equal(observed, kpsoi)\n\n # ---------\n # remaining polygons tests\n # ---------\n def test_polygons_outside_of_image(self):\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)\n exterior = [(-10, -10), (110, -10), (110, 90), (-10, 90)]\n poly = ia.Polygon(exterior)\n psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))\n\n observed = aug.augment_polygons(psoi)\n\n assert_cbaois_equal(observed, psoi)\n\n def test_empty_polygons(self):\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)\n psoi = ia.PolygonsOnImage([], shape=(10, 10, 3))\n\n observed = aug.augment_polygons(psoi)\n\n assert_cbaois_equal(observed, psoi)\n\n # ---------\n # remaining line string tests\n # ---------\n def test_line_strings_outside_of_image(self):\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)\n coords = [(-10, -10), (110, -10), (110, 90), (-10, 90)]\n ls = ia.LineString(coords)\n lsoi = ia.LineStringsOnImage([ls], shape=(10, 10, 3))\n\n observed = aug.augment_line_strings(lsoi)\n\n assert_cbaois_equal(observed, lsoi)\n\n def test_empty_line_strings(self):\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)\n lsoi = ia.LineStringsOnImage([], shape=(10, 10, 3))\n\n observed = aug.augment_line_strings(lsoi)\n\n assert_cbaois_equal(observed, lsoi)\n\n # ---------\n # remaining bounding box tests\n # ---------\n def test_bounding_boxes_outside_of_image(self):\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=10, nb_cols=10)\n bbs = ia.BoundingBox(x1=-10, y1=-10, x2=15, y2=15)\n bbsoi = ia.BoundingBoxesOnImage([bbs], shape=(10, 10, 3))\n\n observed = aug.augment_bounding_boxes(bbsoi)\n\n assert_cbaois_equal(observed, bbsoi)\n\n def test_empty_bounding_boxes(self):\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=10, nb_cols=10)\n bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))\n\n observed = aug.augment_bounding_boxes(bbsoi)\n\n assert_cbaois_equal(observed, bbsoi)\n\n # ---------\n # zero-sized axes\n # ---------\n def test_zero_sized_axes(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.PiecewiseAffine(scale=0.05, nb_rows=2, nb_cols=2)\n\n image_aug = aug(image=image)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n def test_zero_sized_axes_absolute_scale(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.PiecewiseAffine(scale=5, nb_rows=2, nb_cols=2,\n absolute_scale=True)\n\n image_aug = aug(image=image)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n # ---------\n # other methods\n # ---------\n def test_get_parameters(self):\n aug = iaa.PiecewiseAffine(scale=0.1, nb_rows=8, nb_cols=10, order=1,\n cval=2, mode=\"constant\",\n absolute_scale=False)\n params = aug.get_parameters()\n assert params[0] is aug.jitter.scale\n assert params[1] is aug.nb_rows\n assert params[2] is aug.nb_cols\n assert params[3] is aug.order\n assert params[4] is aug.cval\n assert params[5] is aug.mode\n assert params[6] is False\n assert 0.1 - 1e-8 < params[0].value < 0.1 + 1e-8\n assert params[1].value == 8\n assert params[2].value == 10\n assert params[3].value == 1\n assert params[4].value == 2\n assert params[5].value == \"constant\"\n\n # ---------\n # other dtypes\n # ---------\n @property\n def other_dtypes_mask(self):\n mask = np.zeros((21, 21), dtype=bool)\n mask[:, 7:13] = True\n return mask\n\n def test_other_dtypes_bool(self):\n aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,\n mode=\"constant\")\n\n image = np.zeros((21, 21), dtype=bool)\n image[self.other_dtypes_mask] = True\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert not np.all(image_aug == 1)\n assert np.any(image_aug[~self.other_dtypes_mask] == 1)\n\n def test_other_dtypes_uint_int(self):\n aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,\n mode=\"constant\")\n\n dtypes = [\"uint8\", \"uint16\", \"uint32\", \"int8\", \"int16\", \"int32\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n if np.dtype(dtype).kind == \"i\":\n values = [1, 5, 10, 100, int(0.1 * max_value),\n int(0.2 * max_value), int(0.5 * max_value),\n max_value-100, max_value]\n values = values + [(-1)*value for value in values]\n else:\n values = [1, 5, 10, 100, int(center_value),\n int(0.1 * max_value), int(0.2 * max_value),\n int(0.5 * max_value), max_value-100, max_value]\n\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((21, 21), dtype=dtype)\n image[:, 7:13] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert not np.all(image_aug == value)\n assert np.any(image_aug[~self.other_dtypes_mask] == value)\n\n def test_other_dtypes_float(self):\n aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=8, nb_cols=4, order=0,\n mode=\"constant\")\n\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [\n 0.01,\n 1.0,\n 10.0,\n 100.0,\n 500 ** (isize - 1),\n float(np.float64(1000 ** (isize - 1)))\n ]\n values = values + [(-1) * value for value in values]\n values = values + [min_value, max_value]\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((21, 21), dtype=dtype)\n image[:, 7:13] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert not np.all(_isclose(image_aug, value))\n assert np.any(_isclose(image_aug[~self.other_dtypes_mask],\n value))\n\n def test_pickleable(self):\n aug = iaa.PiecewiseAffine(scale=0.2, nb_rows=4, nb_cols=4, seed=1)\n runtest_pickleable_uint8_img(aug, iterations=3, shape=(25, 25, 1))\n\n\nclass TestPerspectiveTransform(unittest.TestCase):\n def setUp(self):\n reseed()\n\n @property\n def image(self):\n img = np.zeros((30, 30), dtype=np.uint8)\n img[10:20, 10:20] = 255\n return img\n\n @property\n def heatmaps(self):\n return HeatmapsOnImage((self.image / 255.0).astype(np.float32),\n shape=self.image.shape)\n\n @property\n def segmaps(self):\n return SegmentationMapsOnImage((self.image > 0).astype(np.int32),\n shape=self.image.shape)\n\n # --------\n # __init__\n # --------\n def test___init___scale_is_tuple(self):\n # tuple for scale\n aug = iaa.PerspectiveTransform(scale=(0.1, 0.2))\n assert is_parameter_instance(aug.jitter.scale, iap.Uniform)\n assert is_parameter_instance(aug.jitter.scale.a, iap.Deterministic)\n assert is_parameter_instance(aug.jitter.scale.b, iap.Deterministic)\n assert 0.1 - 1e-8 < aug.jitter.scale.a.value < 0.1 + 1e-8\n assert 0.2 - 1e-8 < aug.jitter.scale.b.value < 0.2 + 1e-8\n\n def test___init___scale_is_list(self):\n # list for scale\n aug = iaa.PerspectiveTransform(scale=[0.1, 0.2, 0.3])\n assert is_parameter_instance(aug.jitter.scale, iap.Choice)\n assert len(aug.jitter.scale.a) == 3\n assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8\n assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8\n assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8\n\n def test___init___scale_is_stochastic_parameter(self):\n # StochasticParameter for scale\n aug = iaa.PerspectiveTransform(scale=iap.Choice([0.1, 0.2, 0.3]))\n assert is_parameter_instance(aug.jitter.scale, iap.Choice)\n assert len(aug.jitter.scale.a) == 3\n assert 0.1 - 1e-8 < aug.jitter.scale.a[0] < 0.1 + 1e-8\n assert 0.2 - 1e-8 < aug.jitter.scale.a[1] < 0.2 + 1e-8\n assert 0.3 - 1e-8 < aug.jitter.scale.a[2] < 0.3 + 1e-8\n\n def test___init___bad_datatype_for_scale_leads_to_failure(self):\n # bad datatype for scale\n got_exception = False\n try:\n _ = iaa.PerspectiveTransform(scale=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___mode_is_all(self):\n aug = iaa.PerspectiveTransform(cval=0, mode=ia.ALL)\n assert is_parameter_instance(aug.mode, iap.Choice)\n\n def test___init___mode_is_string(self):\n aug = iaa.PerspectiveTransform(cval=0, mode=\"replicate\")\n assert is_parameter_instance(aug.mode, iap.Deterministic)\n assert aug.mode.value == \"replicate\"\n\n def test___init___mode_is_list(self):\n aug = iaa.PerspectiveTransform(cval=0, mode=[\"replicate\", \"constant\"])\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert (\n len(aug.mode.a) == 2\n and \"replicate\" in aug.mode.a\n and \"constant\" in aug.mode.a)\n\n def test___init___mode_is_stochastic_parameter(self):\n aug = iaa.PerspectiveTransform(\n cval=0, mode=iap.Choice([\"replicate\", \"constant\"]))\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert (\n len(aug.mode.a) == 2\n and \"replicate\" in aug.mode.a\n and \"constant\" in aug.mode.a)\n\n # --------\n # image, heatmaps, segmaps\n # --------\n def test_image_without_keep_size(self):\n # without keep_size\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_image(self.image)\n\n y1 = int(30*0.2)\n y2 = int(30*0.8)\n x1 = int(30*0.2)\n x2 = int(30*0.8)\n\n expected = self.image[y1:y2, x1:x2]\n assert all([\n abs(s1-s2) <= 1 for s1, s2 in zip(observed.shape, expected.shape)\n ])\n if observed.shape != expected.shape:\n observed = ia.imresize_single_image(\n observed, expected.shape[0:2], interpolation=\"cubic\")\n # differences seem to mainly appear around the border of the inner\n # rectangle, possibly due to interpolation\n assert np.average(\n np.abs(observed.astype(np.int32) - expected.astype(np.int32))\n ) < 30.0\n\n def test_image_heatmaps_alignment_without_keep_size(self):\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n hm = HeatmapsOnImage(\n self.image.astype(np.float32)/255.0,\n shape=(30, 30)\n )\n\n observed = aug.augment_image(self.image)\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n y1 = int(30*0.2)\n y2 = int(30*0.8)\n x1 = int(30*0.2)\n x2 = int(30*0.8)\n\n expected = (y2 - y1, x2 - x1)\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(hm_aug.shape, expected)\n ])\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(hm_aug.arr_0to1.shape, expected + (1,))\n ])\n img_aug_mask = observed > 255*0.1\n hm_aug_mask = hm_aug.arr_0to1 > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.99\n\n def test_image_segmaps_alignment_without_keep_size(self):\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n segmaps = SegmentationMapsOnImage(\n (self.image > 100).astype(np.int32),\n shape=(30, 30)\n )\n\n observed = aug.augment_image(self.image)\n segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]\n\n y1 = int(30*0.2)\n y2 = int(30*0.8)\n x1 = int(30*0.2)\n x2 = int(30*0.8)\n\n expected = (y2 - y1, x2 - x1)\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(segmaps_aug.shape, expected)\n ])\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(segmaps_aug.arr.shape, expected + (1,))\n ])\n img_aug_mask = observed > 255*0.5\n segmaps_aug_mask = segmaps_aug.arr > 0\n same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.99\n\n def test_consecutive_calls_produce_different_results(self):\n # PerspectiveTransform works with random_state.copy(), so we\n # test explicitly that it doesn't always use the same samples\n aug = iaa.PerspectiveTransform((0.0, 0.2))\n image = np.mod(np.arange(16*16), 255).astype(np.uint8).reshape((16, 16))\n nb_same = 0\n last_image = aug(image=image)\n for _ in np.arange(100):\n image_aug = aug(image=image)\n nb_same += int(np.array_equal(image_aug, last_image))\n assert nb_same <= 1\n\n def test_heatmaps_smaller_than_image_without_keep_size(self):\n # without keep_size, different heatmap size\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n height, width = 300, 200\n height_small, width_small = 150, 100\n\n y1 = int(height*0.2)\n y2 = int(height*0.8)\n x1 = int(width*0.2)\n x2 = int(width*0.8)\n y1_small = int(height_small*0.2)\n y2_small = int(height_small*0.8)\n x1_small = int(width_small*0.2)\n x2_small = int(width_small*0.8)\n\n img_small = ia.imresize_single_image(\n self.image,\n (height_small, width_small),\n interpolation=\"cubic\")\n hm = ia.HeatmapsOnImage(\n img_small.astype(np.float32)/255.0,\n shape=(height, width))\n\n img_aug = aug.augment_image(self.image)\n hm_aug = aug.augment_heatmaps([hm])[0]\n\n expected = (y2 - y1, x2 - x1)\n expected_small = (y2_small - y1_small, x2_small - x1_small, 1)\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(hm_aug.shape, expected)\n ])\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(hm_aug.arr_0to1.shape, expected_small)\n ])\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, img_aug.shape[0:2], interpolation=\"linear\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.96\n\n def test_segmaps_smaller_than_image_without_keep_size(self):\n # without keep_size, different segmap size\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n y1 = int(30*0.2)\n y2 = int(30*0.8)\n x1 = int(30*0.2)\n x2 = int(30*0.8)\n x1_small = int(25*0.2)\n x2_small = int(25*0.8)\n y1_small = int(20*0.2)\n y2_small = int(20*0.8)\n\n img_small = ia.imresize_single_image(\n self.image,\n (20, 25),\n interpolation=\"cubic\")\n seg = SegmentationMapsOnImage(\n (img_small > 100).astype(np.int32),\n shape=(30, 30))\n\n img_aug = aug.augment_image(self.image)\n seg_aug = aug.augment_segmentation_maps([seg])[0]\n\n expected = (y2 - y1, x2 - x1)\n expected_small = (y2_small - y1_small, x2_small - x1_small, 1)\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(seg_aug.shape, expected)\n ])\n assert all([\n abs(s1-s2) <= 1\n for s1, s2\n in zip(seg_aug.arr.shape, expected_small)\n ])\n img_aug_mask = img_aug > 255*0.5\n seg_aug_mask = ia.imresize_single_image(\n seg_aug.arr, img_aug.shape[0:2], interpolation=\"nearest\") > 0\n same = np.sum(img_aug_mask == seg_aug_mask[:, :, 0])\n assert (same / img_aug_mask.size) >= 0.92\n\n def test_image_with_keep_size(self):\n # with keep_size\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_image(self.image)\n\n expected = self.image[int(30*0.2):int(30*0.8),\n int(30*0.2):int(30*0.8)]\n expected = ia.imresize_single_image(\n expected,\n self.image.shape[0:2],\n interpolation=\"cubic\")\n assert observed.shape == self.image.shape\n # differences seem to mainly appear around the border of the inner\n # rectangle, possibly due to interpolation\n assert np.average(\n np.abs(observed.astype(np.int32) - expected.astype(np.int32))\n ) < 30.0\n\n def test_heatmaps_with_keep_size(self):\n # with keep_size, heatmaps\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n heatmaps_arr = self.heatmaps.get_arr()\n expected = heatmaps_arr[int(30*0.2):int(30*0.8),\n int(30*0.2):int(30*0.8)]\n expected = ia.imresize_single_image(\n (expected*255).astype(np.uint8),\n self.image.shape[0:2],\n interpolation=\"cubic\")\n expected = (expected / 255.0).astype(np.float32)\n assert observed.shape == self.heatmaps.shape\n _assert_same_min_max(observed, self.heatmaps)\n # differences seem to mainly appear around the border of the inner\n # rectangle, possibly due to interpolation\n assert np.average(np.abs(observed.get_arr() - expected)) < 30.0\n\n def test_segmaps_with_keep_size(self):\n # with keep_size, segmaps\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n segmaps_arr = self.segmaps.get_arr()\n expected = segmaps_arr[int(30*0.2):int(30*0.8),\n int(30*0.2):int(30*0.8)]\n expected = ia.imresize_single_image(\n (expected*255).astype(np.uint8),\n self.image.shape[0:2],\n interpolation=\"cubic\")\n expected = (expected > 255*0.5).astype(np.int32)\n assert observed.shape == self.segmaps.shape\n assert np.average(observed.get_arr() != expected) < 0.05\n\n def test_image_rgb_with_keep_size(self):\n # with keep_size, RGB images\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n imgs = np.tile(self.image[np.newaxis, :, :, np.newaxis], (2, 1, 1, 3))\n\n observed = aug.augment_images(imgs)\n\n for img_idx in sm.xrange(2):\n for c in sm.xrange(3):\n observed_i = observed[img_idx, :, :, c]\n expected = imgs[img_idx,\n int(30*0.2):int(30*0.8),\n int(30*0.2):int(30*0.8),\n c]\n expected = ia.imresize_single_image(\n expected, imgs.shape[1:3], interpolation=\"cubic\")\n assert observed_i.shape == imgs.shape[1:3]\n # differences seem to mainly appear around the border of the\n # inner rectangle, possibly due to interpolation\n assert np.average(\n np.abs(\n observed_i.astype(np.int32) - expected.astype(np.int32)\n )\n ) < 30.0\n\n # --------\n # keypoints\n # --------\n def test_keypoints_without_keep_size(self):\n # keypoint augmentation without keep_size\n # TODO deviations of around 0.4-0.7 in this and the next test (between\n # expected and observed coordinates) -- why?\n kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]\n kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_keypoints([kpsoi])\n\n kps_expected = [\n ia.Keypoint(x=10-0.2*30, y=10-0.2*30),\n ia.Keypoint(x=14-0.2*30, y=11-0.2*30)\n ]\n gen = zip(observed[0].keypoints, kps_expected)\n # TODO deviations of around 0.5 here from expected values, why?\n for kp_observed, kp_expected in gen:\n assert kp_observed.coords_almost_equals(\n kp_expected, max_distance=1.5)\n\n def test_keypoints_with_keep_size(self):\n # keypoint augmentation with keep_size\n kps = [ia.Keypoint(x=10, y=10), ia.Keypoint(x=14, y=11)]\n kpsoi = ia.KeypointsOnImage(kps, shape=self.image.shape)\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_keypoints([kpsoi])\n\n kps_expected = [\n ia.Keypoint(x=((10-0.2*30)/(30*0.6))*30,\n y=((10-0.2*30)/(30*0.6))*30),\n ia.Keypoint(x=((14-0.2*30)/(30*0.6))*30,\n y=((11-0.2*30)/(30*0.6))*30)\n ]\n gen = zip(observed[0].keypoints, kps_expected)\n # TODO deviations of around 0.5 here from expected values, why?\n for kp_observed, kp_expected in gen:\n assert kp_observed.coords_almost_equals(\n kp_expected, max_distance=1.5)\n\n def test_image_keypoint_alignment(self):\n img = np.zeros((100, 100), dtype=np.uint8)\n img[25-3:25+3, 25-3:25+3] = 255\n img[50-3:50+3, 25-3:25+3] = 255\n img[75-3:75+3, 25-3:25+3] = 255\n img[25-3:25+3, 75-3:75+3] = 255\n img[50-3:50+3, 75-3:75+3] = 255\n img[75-3:75+3, 75-3:75+3] = 255\n img[50-3:75+3, 50-3:75+3] = 255\n kps = [\n ia.Keypoint(y=25, x=25), ia.Keypoint(y=50, x=25),\n ia.Keypoint(y=75, x=25), ia.Keypoint(y=25, x=75),\n ia.Keypoint(y=50, x=75), ia.Keypoint(y=75, x=75),\n ia.Keypoint(y=50, x=50)\n ]\n kpsoi = ia.KeypointsOnImage(kps, shape=img.shape)\n aug = iaa.PerspectiveTransform(scale=(0.05, 0.15), keep_size=True)\n\n for _ in sm.xrange(10):\n aug_det = aug.to_deterministic()\n imgs_aug = aug_det.augment_images([img, img])\n kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])\n\n for img_aug, kpsoi_aug in zip(imgs_aug, kpsois_aug):\n assert kpsoi_aug.shape == img.shape\n for kp_aug in kpsoi_aug.keypoints:\n x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))\n if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:\n assert img_aug[y, x] > 10\n\n def test_empty_keypoints(self):\n # test empty keypoints\n kpsoi = ia.KeypointsOnImage([], shape=(20, 10, 3))\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n\n observed = aug.augment_keypoints(kpsoi)\n\n assert_cbaois_equal(observed, kpsoi)\n\n # --------\n # abstract test methods for polygons and line strings\n # --------\n @classmethod\n def _test_cbaois_without_keep_size(cls, cba_class, cbaoi_class, augf_name):\n points = np.float32([\n [10, 10],\n [25, 10],\n [25, 25],\n [10, 25]\n ])\n cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert observed.shape == (30 - 12, 30 - 12, 3)\n assert len(observed.items) == 1\n if hasattr(observed.items[0], \"is_valid\"):\n assert observed.items[0].is_valid\n\n points_expected = np.copy(points)\n points_expected[:, 0] -= 0.2 * 30\n points_expected[:, 1] -= 0.2 * 30\n # TODO deviations of around 0.5 here from expected values, why?\n assert observed.items[0].coords_almost_equals(\n points_expected, max_distance=1.5)\n\n @classmethod\n def _test_cbaois_with_keep_size(cls, cba_class, cbaoi_class, augf_name):\n # polygon augmentation with keep_size\n points = np.float32([\n [10, 10],\n [25, 10],\n [25, 25],\n [10, 25]\n ])\n cbaoi = cbaoi_class([cba_class(points)], shape=(30, 30, 3))\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert observed.shape == (30, 30, 3)\n assert len(observed.items) == 1\n if hasattr(observed.items[0], \"is_valid\"):\n assert observed.items[0].is_valid\n\n points_expected = np.copy(points)\n points_expected[:, 0] = (\n (points_expected[:, 0] - 0.2 * 30) / (30 * 0.6)\n ) * 30\n points_expected[:, 1] = (\n (points_expected[:, 1] - 0.2 * 30) / (30 * 0.6)\n ) * 30\n # TODO deviations of around 0.5 here from expected values, why?\n assert observed.items[0].coords_almost_equals(\n points_expected, max_distance=2.5)\n\n @classmethod\n def _test_image_cba_alignment(cls, cba_class, cbaoi_class, augf_name):\n img = np.zeros((100, 100), dtype=np.uint8)\n img[25-3:25+3, 25-3:25+3] = 255\n img[50-3:50+3, 25-3:25+3] = 255\n img[75-3:75+3, 25-3:25+3] = 255\n img[25-3:25+3, 75-3:75+3] = 255\n img[50-3:50+3, 75-3:75+3] = 255\n img[75-3:75+3, 75-3:75+3] = 255\n points = [\n [25, 25],\n [75, 25],\n [75, 50],\n [75, 75],\n [25, 75],\n [25, 50]\n ]\n\n cbaoi = cbaoi_class([cba_class(points)], shape=img.shape)\n aug = iaa.PerspectiveTransform(scale=0.1, keep_size=True)\n for _ in sm.xrange(10):\n aug_det = aug.to_deterministic()\n imgs_aug = aug_det.augment_images([img] * 4)\n cbaois_aug = getattr(aug_det, augf_name)([cbaoi] * 4)\n\n for img_aug, cbaoi_aug in zip(imgs_aug, cbaois_aug):\n assert cbaoi_aug.shape == img.shape\n for cba_aug in cbaoi_aug.items:\n if hasattr(cba_aug, \"is_valid\"):\n assert cba_aug.is_valid\n for x, y in cba_aug.coords:\n if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:\n bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)\n img_ex = bb.extract_from_image(img_aug)\n assert np.any(img_ex > 10)\n\n @classmethod\n def _test_empty_cba(cls, cbaoi, augf_name):\n # test empty polygons\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(observed, cbaoi)\n\n # --------\n # polygons\n # --------\n def test_polygons_without_keep_size(self):\n self._test_cbaois_without_keep_size(ia.Polygon, ia.PolygonsOnImage,\n \"augment_polygons\")\n\n def test_polygons_with_keep_size(self):\n self._test_cbaois_with_keep_size(ia.Polygon, ia.PolygonsOnImage,\n \"augment_polygons\")\n\n def test_image_polygon_alignment(self):\n self._test_image_cba_alignment(ia.Polygon, ia.PolygonsOnImage,\n \"augment_polygons\")\n\n def test_empty_polygons(self):\n psoi = ia.PolygonsOnImage([], shape=(20, 10, 3))\n self._test_empty_cba(psoi, \"augment_polygons\")\n\n def test_polygons_under_extreme_scale_values(self):\n # test extreme scales\n # TODO when setting .min_height and .min_width in PerspectiveTransform\n # to 1x1, at least one of the output polygons was invalid and had\n # only 3 instead of the expected 4 points - why?\n for scale in [0.1, 0.2, 0.3, 0.4]:\n with self.subTest(scale=scale):\n exterior = np.float32([\n [10, 10],\n [25, 10],\n [25, 25],\n [10, 25]\n ])\n psoi = ia.PolygonsOnImage([ia.Polygon(exterior)],\n shape=(30, 30, 3))\n aug = iaa.PerspectiveTransform(scale=scale, keep_size=True)\n aug.jitter = iap.Deterministic(scale)\n\n observed = aug.augment_polygons(psoi)\n\n assert observed.shape == (30, 30, 3)\n assert len(observed.polygons) == 1\n assert observed.polygons[0].is_valid\n\n # FIXME this part is currently deactivated due to too large\n # deviations from expectations. As the alignment check\n # works, this is probably some error on the test side\n \"\"\"\n exterior_expected = np.copy(exterior)\n exterior_expected[:, 0] = (\n (exterior_expected[:, 0] - scale * 30) / (30*(1-2*scale))\n ) * 30\n exterior_expected[:, 1] = (\n (exterior_expected[:, 1] - scale * 30) / (30*(1-2*scale))\n ) * 30\n poly0 = observed.polygons[0]\n # TODO deviations of around 0.5 here from expected values, why?\n assert poly0.exterior_almost_equals(\n exterior_expected, max_distance=2.0)\n \"\"\"\n\n # --------\n # line strings\n # --------\n def test_line_strings_without_keep_size(self):\n self._test_cbaois_without_keep_size(ia.LineString, ia.LineStringsOnImage,\n \"augment_line_strings\")\n\n def test_line_strings_with_keep_size(self):\n self._test_cbaois_with_keep_size(ia.LineString, ia.LineStringsOnImage,\n \"augment_line_strings\")\n\n def test_image_line_string_alignment(self):\n self._test_image_cba_alignment(ia.LineString, ia.LineStringsOnImage,\n \"augment_line_strings\")\n\n def test_empty_line_strings(self):\n lsoi = ia.LineStringsOnImage([], shape=(20, 10, 3))\n self._test_empty_cba(lsoi, \"augment_line_strings\")\n\n # --------\n # bounding boxes\n # --------\n def test_bounding_boxes_without_keep_size(self):\n # BB augmentation without keep_size\n # TODO deviations of around 0.4-0.7 in this and the next test (between\n # expected and observed coordinates) -- why?\n bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_bounding_boxes([bbsoi])\n\n bbs_expected = [\n ia.BoundingBox(x1=0-0.2*30, y1=10-0.2*30,\n x2=20-0.2*30, y2=20-0.2*30)\n ]\n gen = zip(observed[0].bounding_boxes, bbs_expected)\n # TODO deviations of around 0.5 here from expected values, why?\n for bb_observed, bb_expected in gen:\n assert bb_observed.coords_almost_equals(\n bb_expected, max_distance=1.5)\n\n def test_bounding_boxes_with_keep_size(self):\n # BB augmentation with keep_size\n bbs = [ia.BoundingBox(x1=0, y1=10, x2=20, y2=20)]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = iap.Deterministic(0.2)\n\n observed = aug.augment_bounding_boxes([bbsoi])\n\n bbs_expected = [\n ia.BoundingBox(\n x1=((0-0.2*30)/(30*0.6))*30,\n y1=((10-0.2*30)/(30*0.6))*30,\n x2=((20-0.2*30)/(30*0.6))*30,\n y2=((20-0.2*30)/(30*0.6))*30\n )\n ]\n gen = zip(observed[0].bounding_boxes, bbs_expected)\n # TODO deviations of around 0.5 here from expected values, why?\n for bb_observed, bb_expected in gen:\n assert bb_observed.coords_almost_equals(\n bb_expected, max_distance=1.5)\n\n def test_image_bounding_box_alignment(self):\n img = np.zeros((100, 100), dtype=np.uint8)\n img[35:35+1, 35:65+1] = 255\n img[65:65+1, 35:65+1] = 255\n img[35:65+1, 35:35+1] = 255\n img[35:65+1, 65:65+1] = 255\n bbs = [\n ia.BoundingBox(y1=35.5, x1=35.5, y2=65.5, x2=65.5),\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=img.shape)\n aug = iaa.PerspectiveTransform(scale=(0.05, 0.2), keep_size=True)\n\n for _ in sm.xrange(30):\n imgs_aug, bbsois_aug = aug(\n images=[img, img, img, img],\n bounding_boxes=[bbsoi, bbsoi, bbsoi, bbsoi])\n\n nb_skipped = 0\n for img_aug, bbsoi_aug in zip(imgs_aug, bbsois_aug):\n assert bbsoi_aug.shape == img_aug.shape\n for bb_aug in bbsoi_aug.bounding_boxes:\n if bb_aug.is_fully_within_image(img_aug):\n # top, bottom, left, right\n x1 = bb_aug.x1_int\n x2 = bb_aug.x2_int\n y1 = bb_aug.y1_int\n y2 = bb_aug.y2_int\n top_row = img_aug[y1-1:y1+1, x1-1:x2+1]\n btm_row = img_aug[y2-1:y2+1, x1-1:x2+1]\n lft_row = img_aug[y1-1:y2+1, x1-1:x1+1]\n rgt_row = img_aug[y1-1:y2+1, x2-1:x2+1]\n assert np.max(top_row) > 10\n assert np.max(btm_row) > 10\n assert np.max(lft_row) > 10\n assert np.max(rgt_row) > 10\n else:\n nb_skipped += 1\n assert nb_skipped <= 3\n\n def test_bounding_boxes_cover_extreme_points(self):\n # Test that for BBs, the augmented BB x coord is really the minimum\n # of the BB corner x-coords after augmentation and e.g. not just always\n # the augmented top-left corner's coordinate.\n h = w = 200 # height, width\n s = 5 # block size\n j_r = 0.1 # relative amount of jitter\n j = int(h * j_r) # absolute amount of jitter\n\n # Note that PerspectiveTransform currently places four points on the\n # image and back-projects to the image size (roughly).\n # That's why e.g. TopWiderThanBottom has coordinates that seem like\n # the top is thinner than the bottom (after projecting back to the\n # image rectangle, the top becomes wider).\n class _JitterTopWiderThanBottom(object):\n def draw_samples(self, size, random_state):\n return np.float32([\n [\n [j_r, 0.0], # top-left\n [j_r, 0.0], # top-right\n [0.0, 0.0], # bottom-right\n [0.0, 0.0], # bottom-left\n ]\n ])\n\n class _JitterTopThinnerThanBottom(object):\n def draw_samples(self, size, random_state):\n return np.float32([\n [\n [0.0, 0.0], # top-left\n [0.0, 0.0], # top-right\n [j_r, 0.0], # bottom-right\n [j_r, 0.0], # bottom-left\n ]\n ])\n\n class _JitterLeftWiderThanRight(object):\n def draw_samples(self, size, random_state):\n return np.float32([\n [\n [0.0, j_r], # top-left\n [0.0, 0.0], # top-right\n [0.0, 0.0], # bottom-right\n [0.0, j_r], # bottom-left\n ]\n ])\n\n class _JitterLeftThinnerThanRight(object):\n def draw_samples(self, size, random_state):\n return np.float32([\n [\n [0.0, 0.0], # top-left\n [0.0, j_r], # top-right\n [0.0, j_r], # bottom-right\n [0.0, 0.0], # bottom-left\n ]\n ])\n\n jitters = [\n _JitterTopWiderThanBottom(),\n _JitterTopThinnerThanBottom(),\n _JitterLeftWiderThanRight(),\n _JitterLeftThinnerThanRight(),\n ]\n\n # expected coordinates after applying the above jitter\n # coordinates here are given as\n # (ystart, yend), (xstart, xend)\n coords = [\n # top wider than bottom\n [\n [(0+j, s+j+1), (0, s+1)], # top left\n [(0+j, s+j+1), (w-s, w+1)], # top right\n [(h-s-j, h-j+1), (w-s-j, w-j+1)], # bottom right\n [(h-s-j, h-j+1), (0+j, s+j+1)] # bottom left\n ],\n # top thinner than bottom\n [\n [(0+j, s+j+1), (0+j, s+j+1)],\n [(0+j, s+j+1), (w-s-j, w-j+1)],\n [(h-s-j, h-j+1), (w-s, w+1)],\n [(h-s-j, h-j+1), (0, s+1)]\n ],\n # left wider than right\n [\n [(0, s+1), (0+j, s+j+1)],\n [(0+j, s+j+1), (w-s-j, w-j+1)],\n [(h-s-j, h-j+1), (w-s-j, w-j+1)],\n [(h-s, h+1), (0+j, s+j+1)]\n ],\n # left thinner than right\n [\n [(0+j, s+j+1), (0+j, s+j+1)],\n [(0, s+1), (w-s-j, w-j+1)],\n [(h-s, h+1), (w-s-j, w-j+1)],\n [(h-s-j, h-j+1), (0+j, s+j+1)]\n ],\n ]\n\n image = np.zeros((h-1, w-1, 4), dtype=np.uint8)\n image = iaa.pad(image, top=1, right=1, bottom=1, left=1, cval=50)\n image[0+j:s+j+1, 0+j:s+j+1, 0] = 255\n image[0+j:s+j+1, w-s-j:w-j+1, 1] = 255\n image[h-s-j:h-j+1, w-s-j:w-j+1, 2] = 255\n image[h-s-j:h-j+1, 0+j:s+j+1, 3] = 255\n\n bb = ia.BoundingBox(x1=0.0+j,\n y1=0.0+j,\n x2=w-j,\n y2=h-j)\n bbsoi = ia.BoundingBoxesOnImage([bb], shape=image.shape)\n\n i = 0\n for jitter, coords_i in zip(jitters, coords):\n with self.subTest(jitter=jitter.__class__.__name__):\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n aug.jitter = jitter\n\n image_aug, bbsoi_aug = aug(image=image, bounding_boxes=bbsoi)\n assert image_aug.shape == image.shape\n\n (tl_y1, tl_y2), (tl_x1, tl_x2) = coords_i[0]\n (tr_y1, tr_y2), (tr_x1, tr_x2) = coords_i[1]\n (br_y1, br_y2), (br_x1, br_x2) = coords_i[2]\n (bl_y1, bl_y2), (bl_x1, bl_x2) = coords_i[3]\n\n # We have to be rather tolerant here (>100 instead of e.g.\n # >200), because the transformation seems to be not that\n # accurate and the blobs may be a few pixels off the expected\n # coorindates.\n assert np.max(image_aug[tl_y1:tl_y2, tl_x1:tl_x2, 0]) > 100\n assert np.max(image_aug[tr_y1:tr_y2, tr_x1:tr_x2, 1]) > 100\n assert np.max(image_aug[br_y1:br_y2, br_x1:br_x2, 2]) > 100\n assert np.max(image_aug[bl_y1:bl_y2, bl_x1:bl_x2, 3]) > 100\n\n # We have rather strong tolerances of 7.5 here, partially\n # because the blobs are wide and the true coordinates are in\n # the center of the blobs; partially, because of above\n # mentioned inaccuracy of PerspectiveTransform.\n bb_aug = bbsoi_aug.bounding_boxes[0]\n exp_x1 = min([tl_x1, tr_x1, br_x1, bl_x1])\n exp_x2 = max([tl_x2, tr_x2, br_x2, bl_x2])\n exp_y1 = min([tl_y1, tr_y1, br_y1, bl_y1])\n exp_y2 = max([tl_y2, tr_y2, br_y2, bl_y2])\n assert np.isclose(bb_aug.x1, exp_x1, atol=7.5)\n assert np.isclose(bb_aug.y1, exp_y1, atol=7.5)\n assert np.isclose(bb_aug.x2, exp_x2, atol=7.5)\n assert np.isclose(bb_aug.y2, exp_y2, atol=7.5)\n\n def test_empty_bounding_boxes(self):\n # test empty bounding boxes\n bbsoi = ia.BoundingBoxesOnImage([], shape=(20, 10, 3))\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=True)\n\n observed = aug.augment_bounding_boxes(bbsoi)\n\n assert_cbaois_equal(observed, bbsoi)\n\n # ------------\n # mode\n # ------------\n def test_draw_samples_with_mode_being_int(self):\n aug = iaa.PerspectiveTransform(scale=0.001, mode=cv2.BORDER_REPLICATE)\n\n samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))\n\n assert samples.modes.shape == (1,)\n assert samples.modes[0] == cv2.BORDER_REPLICATE\n\n def test_draw_samples_with_mode_being_string(self):\n aug = iaa.PerspectiveTransform(scale=0.001, mode=\"replicate\")\n\n samples = aug._draw_samples([(10, 10, 3)], iarandom.RNG(0))\n\n assert samples.modes.shape == (1,)\n assert samples.modes[0] == cv2.BORDER_REPLICATE\n\n def test_mode_replicate_copies_values(self):\n aug = iaa.PerspectiveTransform(\n scale=0.001, mode=\"replicate\", cval=0, seed=31)\n img = np.ones((256, 256, 3), dtype=np.uint8) * 255\n\n img_aug = aug.augment_image(img)\n\n assert (img_aug == 255).all()\n\n def test_mode_constant_uses_cval(self):\n aug255 = iaa.PerspectiveTransform(\n scale=0.001, mode=\"constant\", cval=255, seed=31)\n aug0 = iaa.PerspectiveTransform(\n scale=0.001, mode=\"constant\", cval=0, seed=31)\n img = np.ones((256, 256, 3), dtype=np.uint8) * 255\n\n img_aug255 = aug255.augment_image(img)\n img_aug0 = aug0.augment_image(img)\n\n assert (img_aug255 == 255).all()\n # TODO This was originally \"assert not (...)\", but since\n # PerspectiveTransform has become more precise, there are no\n # filled pixels anymore at the edges. That is because PerspT\n # currently only zooms in and not out. Filled pixels at the sides\n # were previously due to a bug.\n assert (img_aug0 == 255).all()\n\n # ---------\n # fit_output\n # ---------\n def test_fit_output_with_fixed_jitter(self):\n aug = iaa.PerspectiveTransform(scale=0.2, fit_output=True,\n keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n image = np.zeros((40, 40, 3), dtype=np.uint8)\n image[0:3, 0:3, 0] = 255\n image[0:3, 40-3:, 1] = 255\n image[40-3:, 40-3:, 2] = 255\n\n image_aug = aug(image=image)\n\n h, w = image_aug.shape[0:2]\n y0 = np.argmax(image_aug[:, 0, 0])\n x0 = np.argmax(image_aug[0, :, 0])\n y1 = np.argmax(image_aug[:, w-1, 1])\n x1 = np.argmax(image_aug[0, :, 1])\n y2 = np.argmax(image_aug[:, w-1, 2])\n x2 = np.argmax(image_aug[h-1, :, 2])\n\n # different shape\n assert image_aug.shape == image.shape\n\n # corners roughly still at top-left, top-right, bottom-right\n assert 0 <= y0 <= 3\n assert 0 <= x0 <= 3\n assert 0 <= y1 <= 3\n assert image_aug.shape[1]-3 <= x1 <= image_aug.shape[1]\n assert image_aug.shape[1]-3 <= y2 <= image_aug.shape[1]\n assert image_aug.shape[1]-3 <= x2 <= image_aug.shape[1]\n\n # no corner pixels now in the center\n assert np.max(image_aug[8:h-8, 8:w-8, :]) == 0\n\n def test_fit_output_with_random_jitter(self):\n aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,\n keep_size=False)\n\n image = np.zeros((50, 50, 4), dtype=np.uint8)\n image[0:5, 0:5, 0] = 255\n image[0:5, 50-5:, 1] = 255\n image[50-5:, 50-5:, 2] = 255\n image[50-5:, 0:5, 3] = 255\n\n for _ in sm.xrange(10):\n image_aug = aug(image=image)\n\n h, w = image_aug.shape[0:2]\n arr_nochan = np.max(image_aug, axis=2)\n y_idx = np.where(np.max(arr_nochan, axis=1))[0]\n x_idx = np.where(np.max(arr_nochan, axis=0))[0]\n y_min = np.min(y_idx)\n y_max = np.max(y_idx)\n x_min = np.min(x_idx)\n x_max = np.max(x_idx)\n\n tol = 0\n assert 0 <= y_min <= 5+tol\n assert 0 <= x_min <= 5+tol\n assert h-5-tol <= y_max <= h-1\n assert w-5-tol <= x_max <= w-1\n\n def test_fit_output_with_random_jitter__segmentation_maps(self):\n aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,\n keep_size=False)\n\n arr = np.zeros((50, 50, 4), dtype=np.uint8)\n arr[0:5, 0:5, 0] = 1\n arr[0:5, 50-5:, 1] = 1\n arr[50-5:, 50-5:, 2] = 1\n arr[50-5:, 0:5, 3] = 1\n segmap = ia.SegmentationMapsOnImage(arr, shape=(50, 50, 3))\n\n image = np.zeros((49, 49, 3), dtype=np.uint8)\n image = iaa.pad(image, top=1, right=1, bottom=1, left=1, cval=128)\n\n for _ in sm.xrange(10):\n image_aug, segmap_aug = aug(image=image, segmentation_maps=segmap)\n\n h, w = segmap_aug.arr.shape[0:2]\n arr_nochan = np.max(segmap_aug.arr, axis=2)\n y_idx = np.where(np.max(arr_nochan, axis=1))[0]\n x_idx = np.where(np.max(arr_nochan, axis=0))[0]\n y_min = np.min(y_idx)\n y_max = np.max(y_idx)\n x_min = np.min(x_idx)\n x_max = np.max(x_idx)\n\n tol = 0\n assert 0 <= y_min <= 5+tol\n assert 0 <= x_min <= 5+tol\n assert h-5-tol <= y_max <= h-1\n assert w-5-tol <= x_max <= w-1\n\n def test_fit_output_with_fixed_jitter__keypoints(self):\n aug = iaa.PerspectiveTransform(scale=0.1, fit_output=True,\n keep_size=False)\n\n kpsoi = ia.KeypointsOnImage.from_xy_array([\n (0, 0),\n (50, 0),\n (50, 50),\n (0, 50)\n ], shape=(50, 50, 3))\n\n for i in sm.xrange(10):\n kpsoi_aug = aug(keypoints=kpsoi)\n\n h, w = kpsoi_aug.shape[0:2]\n y0, x0 = kpsoi_aug.keypoints[0].y, kpsoi_aug.keypoints[0].x\n y1, x1 = kpsoi_aug.keypoints[1].y, kpsoi_aug.keypoints[1].x\n y2, x2 = kpsoi_aug.keypoints[2].y, kpsoi_aug.keypoints[2].x\n y3, x3 = kpsoi_aug.keypoints[3].y, kpsoi_aug.keypoints[3].x\n\n y_min = min([y0, y1, y2, y3])\n y_max = max([y0, y1, y2, y3])\n x_min = min([x0, x1, x2, x3])\n x_max = max([x0, x1, x2, x3])\n tol = 0.5\n assert 0-tol <= y_min <= tol, \"Got y_min=%.4f at %d\" % (y_min, i)\n assert 0-tol <= x_min <= tol, \"Got x_min=%.4f at %d\" % (x_min, i)\n assert h-tol <= y_max <= h+tol, (\n \"Got y_max=%.4f for h=%.2f at %d\" % (y_max, h, i))\n assert w-tol <= x_max <= w+tol, (\n \"Got x_max=%.4f for w=%.2f at %d\" % (x_max, w, i))\n\n # ---------\n # unusual channel numbers\n # ---------\n def test_unusual_channel_numbers(self):\n shapes = [\n (1, 1, 4),\n (1, 1, 5),\n (1, 1, 512),\n (1, 1, 513)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.PerspectiveTransform(scale=0.01)\n\n image_aug = aug(image=image)\n\n assert np.all(image_aug == 0)\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n # ---------\n # zero-sized axes\n # ---------\n def test_zero_sized_axes(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n for keep_size in [False, True]:\n with self.subTest(shape=shape, keep_size=keep_size):\n for _ in sm.xrange(3):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.PerspectiveTransform(scale=0.01)\n\n image_aug = aug(image=image)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n # --------\n # get_parameters\n # --------\n def test_get_parameters(self):\n aug = iaa.PerspectiveTransform(scale=0.1, keep_size=False)\n params = aug.get_parameters()\n assert is_parameter_instance(params[0], iap.Normal)\n assert is_parameter_instance(params[0].scale, iap.Deterministic)\n assert 0.1 - 1e-8 < params[0].scale.value < 0.1 + 1e-8\n assert params[1] is False\n assert params[2].value == 0\n assert params[3].value == \"constant\"\n assert params[4] is False\n\n # --------\n # other dtypes\n # --------\n def test_other_dtypes_bool(self):\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n y1 = int(30 * 0.2)\n y2 = int(30 * 0.8)\n x1 = int(30 * 0.2)\n x2 = int(30 * 0.8)\n\n image = np.zeros((30, 30), dtype=bool)\n image[12:18, :] = True\n image[:, 12:18] = True\n expected = image[y1:y2, x1:x2]\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.name == image.dtype.name\n assert image_aug.shape == expected.shape\n assert (np.sum(image_aug == expected) / expected.size) > 0.9\n\n def test_other_dtypes_uint_int(self):\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n y1 = int(30 * 0.2)\n y2 = int(30 * 0.8)\n x1 = int(30 * 0.2)\n x2 = int(30 * 0.8)\n\n dtypes = [\"uint8\", \"uint16\", \"int8\", \"int16\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n if np.dtype(dtype).kind == \"i\":\n values = [0, 1, 5, 10, 100, int(0.1 * max_value),\n int(0.2 * max_value), int(0.5 * max_value),\n max_value-100, max_value]\n values = values + [(-1)*value for value in values]\n else:\n values = [0, 1, 5, 10, 100, int(center_value),\n int(0.1 * max_value), int(0.2 * max_value),\n int(0.5 * max_value), max_value-100, max_value]\n\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((30, 30), dtype=dtype)\n image[12:18, :] = value\n image[:, 12:18] = value\n expected = image[y1:y2, x1:x2]\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert image_aug.shape == expected.shape\n # rather high tolerance of 0.7 here because of\n # interpolation\n assert (\n np.sum(image_aug == expected) / expected.size\n ) > 0.7\n\n def test_other_dtypes_float(self):\n aug = iaa.PerspectiveTransform(scale=0.2, keep_size=False)\n aug.jitter = iap.Deterministic(0.2)\n\n y1 = int(30 * 0.2)\n y2 = int(30 * 0.8)\n x1 = int(30 * 0.2)\n x2 = int(30 * 0.8)\n\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [0.01, 1.0, 10.0, 100.0, 500 ** (isize - 1),\n 1000 ** (isize - 1)]\n values = values + [(-1) * value for value in values]\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((30, 30), dtype=dtype)\n image[12:18, :] = value\n image[:, 12:18] = value\n expected = image[y1:y2, x1:x2]\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert image_aug.shape == expected.shape\n # rather high tolerance of 0.7 here because of\n # interpolation\n assert (\n np.sum(_isclose(image_aug, expected)) / expected.size\n ) > 0.7\n\n def test_pickleable(self):\n aug = iaa.PerspectiveTransform(0.2, seed=1)\n runtest_pickleable_uint8_img(aug, iterations=4, shape=(25, 25, 1))\n\n\nclass _elastic_trans_temp_thresholds(object):\n def __init__(self, alpha, sigma):\n self.alpha = alpha\n self.sigma = sigma\n self.old_alpha = None\n self.old_sigma = None\n\n def __enter__(self):\n self.old_alpha = iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH\n self.old_sigma = iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH\n iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.alpha\n iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.sigma\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n iaa.ElasticTransformation.KEYPOINT_AUG_ALPHA_THRESH = self.old_alpha\n iaa.ElasticTransformation.KEYPOINT_AUG_SIGMA_THRESH = self.old_sigma\n\n\n# TODO add tests for order\n# TODO improve tests for cval\n# TODO add tests for mode\nclass TestElasticTransformation(unittest.TestCase):\n def setUp(self):\n reseed()\n\n @property\n def image(self):\n img = np.zeros((50, 50), dtype=np.uint8) + 255\n img = np.pad(img, ((100, 100), (100, 100)), mode=\"constant\",\n constant_values=0)\n return img\n\n @property\n def mask(self):\n img = self.image\n mask = img > 0\n return mask\n\n @property\n def heatmaps(self):\n img = self.image\n return HeatmapsOnImage(img.astype(np.float32) / 255.0,\n shape=img.shape)\n\n @property\n def segmaps(self):\n img = self.image\n return SegmentationMapsOnImage((img > 0).astype(np.int32),\n shape=img.shape)\n\n # -----------\n # __init__\n # -----------\n def test___init___bad_datatype_for_alpha_leads_to_failure(self):\n # test alpha having bad datatype\n got_exception = False\n try:\n _ = iaa.ElasticTransformation(alpha=False, sigma=0.25)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___alpha_is_tuple(self):\n # test alpha being tuple\n aug = iaa.ElasticTransformation(alpha=(1.0, 2.0), sigma=0.25)\n assert is_parameter_instance(aug.alpha, iap.Uniform)\n assert is_parameter_instance(aug.alpha.a, iap.Deterministic)\n assert is_parameter_instance(aug.alpha.b, iap.Deterministic)\n assert 1.0 - 1e-8 < aug.alpha.a.value < 1.0 + 1e-8\n assert 2.0 - 1e-8 < aug.alpha.b.value < 2.0 + 1e-8\n\n def test___init___sigma_is_tuple(self):\n # test sigma being tuple\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=(1.0, 2.0))\n assert is_parameter_instance(aug.sigma, iap.Uniform)\n assert is_parameter_instance(aug.sigma.a, iap.Deterministic)\n assert is_parameter_instance(aug.sigma.b, iap.Deterministic)\n assert 1.0 - 1e-8 < aug.sigma.a.value < 1.0 + 1e-8\n assert 2.0 - 1e-8 < aug.sigma.b.value < 2.0 + 1e-8\n\n def test___init___bad_datatype_for_sigma_leads_to_failure(self):\n # test sigma having bad datatype\n got_exception = False\n try:\n _ = iaa.ElasticTransformation(alpha=0.25, sigma=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___order_is_all(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=ia.ALL)\n assert is_parameter_instance(aug.order, iap.Choice)\n assert all([order in aug.order.a for order in [0, 1, 2, 3, 4, 5]])\n\n def test___init___order_is_int(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=1)\n assert is_parameter_instance(aug.order, iap.Deterministic)\n assert aug.order.value == 1\n\n def test___init___order_is_list(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=[0, 1, 2])\n assert is_parameter_instance(aug.order, iap.Choice)\n assert all([order in aug.order.a for order in [0, 1, 2]])\n\n def test___init___order_is_stochastic_parameter(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,\n order=iap.Choice([0, 1, 2, 3]))\n assert is_parameter_instance(aug.order, iap.Choice)\n assert all([order in aug.order.a for order in [0, 1, 2, 3]])\n\n def test___init___bad_datatype_for_order_leads_to_failure(self):\n got_exception = False\n try:\n _ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, order=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___cval_is_all(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=ia.ALL)\n assert is_parameter_instance(aug.cval, iap.Uniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 0\n assert aug.cval.b.value == 255\n\n def test___init___cval_is_int(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=128)\n assert is_parameter_instance(aug.cval, iap.Deterministic)\n assert aug.cval.value == 128\n\n def test___init___cval_is_list(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,\n cval=[16, 32, 64])\n assert is_parameter_instance(aug.cval, iap.Choice)\n assert all([cval in aug.cval.a for cval in [16, 32, 64]])\n\n def test___init___cval_is_stochastic_parameter(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0,\n cval=iap.Choice([16, 32, 64]))\n assert is_parameter_instance(aug.cval, iap.Choice)\n assert all([cval in aug.cval.a for cval in [16, 32, 64]])\n\n def test___init___cval_is_tuple(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=(128, 255))\n assert is_parameter_instance(aug.cval, iap.Uniform)\n assert is_parameter_instance(aug.cval.a, iap.Deterministic)\n assert is_parameter_instance(aug.cval.b, iap.Deterministic)\n assert aug.cval.a.value == 128\n assert aug.cval.b.value == 255\n\n def test___init___bad_datatype_for_cval_leads_to_failure(self):\n got_exception = False\n try:\n _ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, cval=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n def test___init___mode_is_all(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=ia.ALL)\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert all([\n mode in aug.mode.a\n for mode\n in [\"constant\", \"nearest\", \"reflect\", \"wrap\"]])\n\n def test___init___mode_is_string(self):\n aug = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=\"nearest\")\n assert is_parameter_instance(aug.mode, iap.Deterministic)\n assert aug.mode.value == \"nearest\"\n\n def test___init___mode_is_list(self):\n aug = iaa.ElasticTransformation(\n alpha=0.25, sigma=1.0, mode=[\"constant\", \"nearest\"])\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert all([mode in aug.mode.a for mode in [\"constant\", \"nearest\"]])\n\n def test___init___mode_is_stochastic_parameter(self):\n aug = iaa.ElasticTransformation(\n alpha=0.25, sigma=1.0, mode=iap.Choice([\"constant\", \"nearest\"]))\n assert is_parameter_instance(aug.mode, iap.Choice)\n assert all([mode in aug.mode.a for mode in [\"constant\", \"nearest\"]])\n\n def test___init___bad_datatype_for_mode_leads_to_failure(self):\n got_exception = False\n try:\n _ = iaa.ElasticTransformation(alpha=0.25, sigma=1.0, mode=False)\n except Exception as exc:\n assert \"Expected \" in str(exc)\n got_exception = True\n assert got_exception\n\n # -----------\n # alpha, sigma\n # -----------\n def test_images(self):\n # test basic funtionality\n aug = iaa.ElasticTransformation(alpha=5, sigma=0.25)\n\n observed = aug.augment_image(self.image)\n\n mask = self.mask\n # assume that some white/255 pixels have been moved away from the\n # center and replaced by black/0 pixels\n assert np.sum(observed[mask]) < np.sum(self.image[mask])\n # assume that some black/0 pixels have been moved away from the outer\n # area and replaced by white/255 pixels\n assert np.sum(observed[~mask]) > np.sum(self.image[~mask])\n\n def test_images_nonsquare(self):\n # test basic funtionality with non-square images\n aug = iaa.ElasticTransformation(alpha=2.0, sigma=0.25, order=3)\n img_nonsquare = np.zeros((50, 100), dtype=np.uint8) + 255\n img_nonsquare = np.pad(img_nonsquare, ((100, 100), (100, 100)),\n mode=\"constant\", constant_values=0)\n mask_nonsquare = (img_nonsquare > 0)\n\n observed = aug.augment_image(img_nonsquare)\n\n assert (\n np.sum(observed[mask_nonsquare])\n < np.sum(img_nonsquare[mask_nonsquare]))\n assert (\n np.sum(observed[~mask_nonsquare])\n > np.sum(img_nonsquare[~mask_nonsquare]))\n\n def test_images_unusual_channel_numbers(self):\n # test unusual channels numbers\n aug = iaa.ElasticTransformation(alpha=5, sigma=0.5)\n for nb_channels in [1, 2, 4, 5, 7, 10, 11]:\n img_c = np.tile(self.image[..., np.newaxis], (1, 1, nb_channels))\n assert img_c.shape == (250, 250, nb_channels)\n\n observed = aug.augment_image(img_c)\n\n assert observed.shape == (250, 250, nb_channels)\n for c in sm.xrange(1, nb_channels):\n assert np.array_equal(observed[..., c], observed[..., 0])\n\n def test_heatmaps(self):\n # test basic funtionality, heatmaps\n aug = iaa.ElasticTransformation(alpha=0.5, sigma=0.25)\n observed = aug.augment_heatmaps([self.heatmaps])[0]\n\n mask = self.mask\n assert observed.shape == self.heatmaps.shape\n _assert_same_min_max(observed, self.heatmaps)\n assert (\n np.sum(observed.get_arr()[mask])\n < np.sum(self.heatmaps.get_arr()[mask]))\n assert (\n np.sum(observed.get_arr()[~mask])\n > np.sum(self.heatmaps.get_arr()[~mask]))\n\n def test_segmaps(self):\n # test basic funtionality, segmaps\n # alpha=1.5 instead of 0.5 as above here, because otherwise nothing\n # is moved\n aug = iaa.ElasticTransformation(alpha=1.5, sigma=0.25)\n\n observed = aug.augment_segmentation_maps([self.segmaps])[0]\n\n mask = self.mask\n assert observed.shape == self.segmaps.shape\n assert (\n np.sum(observed.get_arr()[mask])\n < np.sum(self.segmaps.get_arr()[mask]))\n assert (\n np.sum(observed.get_arr()[~mask])\n > np.sum(self.segmaps.get_arr()[~mask]))\n\n def test_images_weak_vs_strong_alpha(self):\n # test effects of increased alpha strength\n aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)\n aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)\n\n observed1 = aug1.augment_image(self.image)\n observed2 = aug2.augment_image(self.image)\n\n mask = self.mask\n # assume that the inner area has become more black-ish when using high\n # alphas (more white pixels were moved out of the inner area)\n assert np.sum(observed1[mask]) > np.sum(observed2[mask])\n # assume that the outer area has become more white-ish when using high\n # alphas (more black pixels were moved into the inner area)\n assert np.sum(observed1[~mask]) < np.sum(observed2[~mask])\n\n def test_heatmaps_weak_vs_strong_alpha(self):\n # test effects of increased alpha strength, heatmaps\n aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)\n aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)\n\n observed1 = aug1.augment_heatmaps([self.heatmaps])[0]\n observed2 = aug2.augment_heatmaps([self.heatmaps])[0]\n\n mask = self.mask\n assert observed1.shape == self.heatmaps.shape\n assert observed2.shape == self.heatmaps.shape\n _assert_same_min_max(observed1, self.heatmaps)\n _assert_same_min_max(observed2, self.heatmaps)\n assert (\n np.sum(observed1.get_arr()[mask])\n > np.sum(observed2.get_arr()[mask]))\n assert (\n np.sum(observed1.get_arr()[~mask])\n < np.sum(observed2.get_arr()[~mask]))\n\n def test_segmaps_weak_vs_strong_alpha(self):\n # test effects of increased alpha strength, segmaps\n aug1 = iaa.ElasticTransformation(alpha=0.1, sigma=0.25)\n aug2 = iaa.ElasticTransformation(alpha=5.0, sigma=0.25)\n\n observed1 = aug1.augment_segmentation_maps([self.segmaps])[0]\n observed2 = aug2.augment_segmentation_maps([self.segmaps])[0]\n\n mask = self.mask\n assert observed1.shape == self.segmaps.shape\n assert observed2.shape == self.segmaps.shape\n assert (\n np.sum(observed1.get_arr()[mask])\n > np.sum(observed2.get_arr()[mask]))\n assert (\n np.sum(observed1.get_arr()[~mask])\n < np.sum(observed2.get_arr()[~mask]))\n\n def test_images_low_vs_high_sigma(self):\n # test effects of increased sigmas\n aug1 = iaa.ElasticTransformation(alpha=3.0, sigma=0.1)\n aug2 = iaa.ElasticTransformation(alpha=3.0, sigma=3.0)\n\n observed1 = aug1.augment_image(self.image)\n observed2 = aug2.augment_image(self.image)\n\n observed1_std_hori = np.std(\n observed1.astype(np.float32)[:, 1:]\n - observed1.astype(np.float32)[:, :-1])\n observed2_std_hori = np.std(\n observed2.astype(np.float32)[:, 1:]\n - observed2.astype(np.float32)[:, :-1])\n observed1_std_vert = np.std(\n observed1.astype(np.float32)[1:, :]\n - observed1.astype(np.float32)[:-1, :])\n observed2_std_vert = np.std(\n observed2.astype(np.float32)[1:, :]\n - observed2.astype(np.float32)[:-1, :])\n observed1_std = (observed1_std_hori + observed1_std_vert) / 2\n observed2_std = (observed2_std_hori + observed2_std_vert) / 2\n assert observed1_std > observed2_std\n\n def test_images_alpha_is_stochastic_parameter(self):\n # test alpha being iap.Choice\n aug = iaa.ElasticTransformation(alpha=iap.Choice([0.001, 5.0]),\n sigma=0.25)\n seen = [0, 0]\n for _ in sm.xrange(100):\n observed = aug.augment_image(self.image)\n diff = np.average(\n np.abs(\n self.image.astype(np.float32)\n - observed.astype(np.float32)\n )\n )\n if diff < 1.0:\n seen[0] += 1\n else:\n seen[1] += 1\n assert seen[0] > 10\n assert seen[1] > 10\n\n def test_sigma_is_stochastic_parameter(self):\n # test sigma being iap.Choice\n for order in [0, 1, 3]:\n with self.subTest(order=order):\n aug = iaa.ElasticTransformation(alpha=50.0,\n sigma=iap.Choice([0.001, 5.0]),\n order=order)\n seen = [0, 0]\n for _ in sm.xrange(100):\n observed = aug.augment_image(self.image)\n\n observed_std_hori = np.std(\n observed.astype(np.float32)[:, 1:]\n - observed.astype(np.float32)[:, :-1])\n observed_std_vert = np.std(\n observed.astype(np.float32)[1:, :]\n - observed.astype(np.float32)[:-1, :])\n observed_std = (observed_std_hori + observed_std_vert) / 2\n\n if observed_std > 25.0:\n seen[0] += 1\n else:\n seen[1] += 1\n assert seen[0] > 10\n assert seen[1] > 10\n\n # -----------\n # cval\n # -----------\n def test_images_cval_is_int_and_order_is_0(self):\n aug = iaa.ElasticTransformation(alpha=30.0, sigma=3.0, mode=\"constant\",\n cval=255, order=0)\n img = np.zeros((100, 100), dtype=np.uint8)\n\n observed = aug.augment_image(img)\n\n assert np.sum(observed == 255) > 0\n assert np.sum(np.logical_and(0 < observed, observed < 255)) == 0\n\n def test_images_cval_is_int_and_order_is_0_weak_alpha(self):\n aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode=\"constant\",\n cval=0, order=0)\n img = np.zeros((100, 100), dtype=np.uint8)\n\n observed = aug.augment_image(img)\n\n assert np.sum(observed == 255) == 0\n\n def test_images_cval_is_int_and_order_is_2(self):\n aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode=\"constant\",\n cval=255, order=2)\n img = np.zeros((100, 100), dtype=np.uint8)\n\n observed = aug.augment_image(img)\n\n assert np.sum(np.logical_and(0 < observed, observed < 255)) > 0\n\n def test_images_cval_is_int_image_hw3(self):\n aug = iaa.ElasticTransformation(alpha=5.0, sigma=3.0, mode=\"constant\",\n cval=255, order=0)\n img = np.zeros((100, 100, 3), dtype=np.uint8)\n\n observed = aug.augment_image(img)\n\n count_255 = np.sum(observed == 255, axis=2)\n mask_not_all_channels_same_intensity = np.logical_and(\n count_255 > 0, count_255 < 3)\n mask_all_channels_same_intensity = (count_255 == 3)\n assert not np.any(mask_not_all_channels_same_intensity)\n assert np.any(mask_all_channels_same_intensity)\n\n def test_heatmaps_ignore_cval(self):\n # cval with heatmaps\n heatmaps = HeatmapsOnImage(\n np.zeros((32, 32, 1), dtype=np.float32), shape=(32, 32, 3))\n aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0,\n mode=\"constant\", cval=255)\n\n observed = aug.augment_heatmaps([heatmaps])[0]\n\n assert observed.shape == heatmaps.shape\n _assert_same_min_max(observed, heatmaps)\n assert np.sum(observed.get_arr() > 0.01) == 0\n\n def test_segmaps_ignore_cval(self):\n # cval with segmaps\n segmaps = SegmentationMapsOnImage(\n np.zeros((32, 32, 1), dtype=np.int32), shape=(32, 32, 3))\n aug = iaa.ElasticTransformation(alpha=3.0, sigma=3.0, mode=\"constant\",\n cval=255)\n\n observed = aug.augment_segmentation_maps([segmaps])[0]\n\n assert observed.shape == segmaps.shape\n assert np.sum(observed.get_arr() > 0) == 0\n\n # -----------\n # keypoints\n # -----------\n def test_keypoints_no_movement_if_alpha_below_threshold(self):\n # for small alpha, should not move if below threshold\n with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):\n kps = [\n ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),\n ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),\n ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),\n ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),\n ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),\n ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)\n \n observed = aug.augment_keypoints([kpsoi])[0]\n \n d = kpsoi.to_xy_array() - observed.to_xy_array()\n d[:, 0] = d[:, 0] ** 2\n d[:, 1] = d[:, 1] ** 2\n d = np.sum(d, axis=1)\n d = np.average(d, axis=0)\n assert d < 1e-8\n\n def test_keypoints_no_movement_if_sigma_below_threshold(self):\n # for small sigma, should not move if below threshold\n with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):\n kps = [\n ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),\n ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),\n ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),\n ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),\n ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),\n ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)\n\n observed = aug.augment_keypoints([kpsoi])[0]\n\n d = kpsoi.to_xy_array() - observed.to_xy_array()\n d[:, 0] = d[:, 0] ** 2\n d[:, 1] = d[:, 1] ** 2\n d = np.sum(d, axis=1)\n d = np.average(d, axis=0)\n assert d < 1e-8\n\n def test_keypoints_small_movement_for_weak_alpha_if_threshold_zero(self):\n # for small alpha (at sigma 1.0), should barely move\n # if thresholds set to zero\n with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):\n kps = [\n ia.Keypoint(x=1, y=1), ia.Keypoint(x=15, y=25),\n ia.Keypoint(x=5, y=5), ia.Keypoint(x=7, y=4),\n ia.Keypoint(x=48, y=5), ia.Keypoint(x=21, y=37),\n ia.Keypoint(x=32, y=39), ia.Keypoint(x=6, y=8),\n ia.Keypoint(x=12, y=21), ia.Keypoint(x=3, y=45),\n ia.Keypoint(x=45, y=3), ia.Keypoint(x=7, y=48)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)\n\n observed = aug.augment_keypoints([kpsoi])[0]\n\n d = kpsoi.to_xy_array() - observed.to_xy_array()\n d[:, 0] = d[:, 0] ** 2\n d[:, 1] = d[:, 1] ** 2\n d = np.sum(d, axis=1)\n d = np.average(d, axis=0)\n assert d < 0.5\n\n def test_image_keypoint_alignment(self):\n # test alignment between between images and keypoints\n image = np.zeros((120, 70), dtype=np.uint8)\n s = 3\n image[:, 35-s:35+s+1] = 255\n kps = [ia.Keypoint(x=35, y=20),\n ia.Keypoint(x=35, y=40),\n ia.Keypoint(x=35, y=60),\n ia.Keypoint(x=35, y=80),\n ia.Keypoint(x=35, y=100)]\n kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)\n aug = iaa.ElasticTransformation(alpha=70, sigma=5)\n aug_det = aug.to_deterministic()\n\n images_aug = aug_det.augment_images([image, image])\n kpsois_aug = aug_det.augment_keypoints([kpsoi, kpsoi])\n\n count_bad = 0\n for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):\n assert kpsoi_aug.shape == (120, 70)\n assert len(kpsoi_aug.keypoints) == 5\n for kp_aug in kpsoi_aug.keypoints:\n x, y = int(np.round(kp_aug.x)), int(np.round(kp_aug.y))\n bb = ia.BoundingBox(x1=x-2, x2=x+2+1, y1=y-2, y2=y+2+1)\n img_ex = bb.extract_from_image(image_aug)\n if np.any(img_ex > 10):\n pass # close to expected location\n else:\n count_bad += 1\n assert count_bad <= 1\n\n def test_empty_keypoints(self):\n aug = iaa.ElasticTransformation(alpha=10, sigma=10)\n kpsoi = ia.KeypointsOnImage([], shape=(10, 10, 3))\n\n kpsoi_aug = aug.augment_keypoints(kpsoi)\n\n assert len(kpsoi_aug.keypoints) == 0\n assert kpsoi_aug.shape == (10, 10, 3)\n\n # -----------\n # abstract methods for polygons and line strings\n # -----------\n @classmethod\n def _test_cbaois_no_movement_if_alpha_below_threshold(\n cls, cba_class, cbaoi_class, augf_name):\n # for small alpha, should not move if below threshold\n with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):\n cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])\n cbaoi = cbaoi_class([cba], shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert observed.shape == (50, 50)\n assert len(observed.items) == 1\n assert observed.items[0].coords_almost_equals(cba)\n if hasattr(observed.items[0], \"is_valid\"):\n assert observed.items[0].is_valid\n\n @classmethod\n def _test_cbaois_no_movement_if_sigma_below_threshold(\n cls, cba_class, cbaoi_class, augf_name):\n # for small sigma, should not move if below threshold\n with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):\n cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])\n cbaoi = cbaoi_class([cba], shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert observed.shape == (50, 50)\n assert len(observed.items) == 1\n assert observed.items[0].coords_almost_equals(cba)\n if hasattr(observed.items[0], \"is_valid\"):\n assert observed.items[0].is_valid\n\n @classmethod\n def _test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(\n cls, cba_class, cbaoi_class, augf_name):\n # for small alpha (at sigma 1.0), should barely move\n # if thresholds set to zero\n with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):\n cba = cba_class([(10, 15), (40, 15), (40, 35), (10, 35)])\n cbaoi = cbaoi_class([cba], shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)\n\n observed = getattr(aug, augf_name)(cbaoi)\n\n assert observed.shape == (50, 50)\n assert len(observed.items) == 1\n assert observed.items[0].coords_almost_equals(\n cba, max_distance=0.5)\n if hasattr(observed.items[0], \"is_valid\"):\n assert observed.items[0].is_valid\n\n @classmethod\n def _test_image_cbaoi_alignment(cls, cba_class, cbaoi_class, augf_name):\n # test alignment between between images and polygons\n height_step_size = 50\n width_step_size = 30\n height_steps = 2 # don't set >2, otherwise polygon will be broken\n width_steps = 10\n height = (2+height_steps) * height_step_size\n width = (2+width_steps) * width_step_size\n s = 3\n\n image = np.zeros((height, width), dtype=np.uint8)\n\n points = []\n for w in sm.xrange(0, 2+width_steps):\n if w not in [0, width_steps+2-1]:\n x = width_step_size * w\n y = height_step_size\n points.append((x, y))\n image[y-s:y+s+1, x-s:x+s+1] = 255\n for w in sm.xrange(2+width_steps-1, 0, -1):\n if w not in [0, width_steps+2-1]:\n x = width_step_size * w\n y = height_step_size*2\n points.append((x, y))\n image[y-s:y+s+1, x-s:x+s+1] = 255\n\n cba = cba_class(points)\n cbaoi = cbaoi_class([cba], shape=image.shape)\n aug = iaa.ElasticTransformation(alpha=100, sigma=7)\n aug_det = aug.to_deterministic()\n\n images_aug = aug_det.augment_images([image, image])\n cbaois_aug = getattr(aug_det, augf_name)([cbaoi, cbaoi])\n\n count_bad = 0\n for image_aug, cbaoi_aug in zip(images_aug, cbaois_aug):\n assert cbaoi_aug.shape == image.shape\n assert len(cbaoi_aug.items) == 1\n for cba_aug in cbaoi_aug.items:\n if hasattr(cba_aug, \"is_valid\"):\n assert cba_aug.is_valid\n for point_aug in cba_aug.coords:\n x, y = point_aug[0], point_aug[1]\n bb = ia.BoundingBox(x1=x-2, x2=x+2, y1=y-2, y2=y+2)\n img_ex = bb.extract_from_image(image_aug)\n if np.any(img_ex > 10):\n pass # close to expected location\n else:\n count_bad += 1\n assert count_bad <= 3\n\n @classmethod\n def _test_empty_cbaois(cls, cbaoi, augf_name):\n aug = iaa.ElasticTransformation(alpha=10, sigma=10)\n\n cbaoi_aug = getattr(aug, augf_name)(cbaoi)\n\n assert_cbaois_equal(cbaoi_aug, cbaoi)\n\n # -----------\n # polygons\n # -----------\n def test_polygons_no_movement_if_alpha_below_threshold(self):\n self._test_cbaois_no_movement_if_alpha_below_threshold(\n ia.Polygon, ia.PolygonsOnImage, \"augment_polygons\")\n\n def test_polygons_no_movement_if_sigma_below_threshold(self):\n self._test_cbaois_no_movement_if_sigma_below_threshold(\n ia.Polygon, ia.PolygonsOnImage, \"augment_polygons\")\n\n def test_polygons_small_movement_for_weak_alpha_if_threshold_zero(self):\n self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(\n ia.Polygon, ia.PolygonsOnImage, \"augment_polygons\")\n\n def test_image_polygon_alignment(self):\n self._test_image_cbaoi_alignment(\n ia.Polygon, ia.PolygonsOnImage, \"augment_polygons\")\n\n def test_empty_polygons(self):\n cbaoi = ia.PolygonsOnImage([], shape=(10, 10, 3))\n self._test_empty_cbaois(cbaoi, \"augment_polygons\")\n\n # -----------\n # line strings\n # -----------\n def test_line_strings_no_movement_if_alpha_below_threshold(self):\n self._test_cbaois_no_movement_if_alpha_below_threshold(\n ia.LineString, ia.LineStringsOnImage, \"augment_line_strings\")\n\n def test_line_strings_no_movement_if_sigma_below_threshold(self):\n self._test_cbaois_no_movement_if_sigma_below_threshold(\n ia.LineString, ia.LineStringsOnImage, \"augment_line_strings\")\n\n def test_line_strings_small_movement_for_weak_alpha_if_threshold_zero(self):\n self._test_cbaois_small_movement_for_weak_alpha_if_threshold_zero(\n ia.LineString, ia.LineStringsOnImage, \"augment_line_strings\")\n\n def test_image_line_string_alignment(self):\n self._test_image_cbaoi_alignment(\n ia.LineString, ia.LineStringsOnImage, \"augment_line_strings\")\n\n def test_empty_line_strings(self):\n cbaoi = ia.LineStringsOnImage([], shape=(10, 10, 3))\n self._test_empty_cbaois(cbaoi, \"augment_line_strings\")\n\n # -----------\n # bounding boxes\n # -----------\n def test_bounding_boxes_no_movement_if_alpha_below_threshold(self):\n # for small alpha, should not move if below threshold\n with _elastic_trans_temp_thresholds(alpha=1.0, sigma=0.0):\n bbs = [\n ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),\n ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)\n\n observed = aug.augment_bounding_boxes([bbsoi])[0]\n\n d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()\n d = d.reshape((2*2, 2))\n d[:, 0] = d[:, 0] ** 2\n d[:, 1] = d[:, 1] ** 2\n d = np.sum(d, axis=1)\n d = np.average(d, axis=0)\n assert d < 1e-8\n\n def test_bounding_boxes_no_movement_if_sigma_below_threshold(self):\n # for small sigma, should not move if below threshold\n with _elastic_trans_temp_thresholds(alpha=0.0, sigma=1.0):\n bbs = [\n ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),\n ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=1.0, sigma=0.001)\n\n observed = aug.augment_bounding_boxes([bbsoi])[0]\n\n d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()\n d = d.reshape((2*2, 2))\n d[:, 0] = d[:, 0] ** 2\n d[:, 1] = d[:, 1] ** 2\n d = np.sum(d, axis=1)\n d = np.average(d, axis=0)\n assert d < 1e-8\n\n def test_bounding_boxes_small_movement_for_weak_alpha_if_threshold_zero(\n self):\n # for small alpha (at sigma 1.0), should barely move\n # if thresholds set to zero\n with _elastic_trans_temp_thresholds(alpha=0.0, sigma=0.0):\n bbs = [\n ia.BoundingBox(x1=10, y1=12, x2=20, y2=22),\n ia.BoundingBox(x1=20, y1=32, x2=40, y2=42)\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(50, 50))\n aug = iaa.ElasticTransformation(alpha=0.001, sigma=1.0)\n\n observed = aug.augment_bounding_boxes([bbsoi])[0]\n\n d = bbsoi.to_xyxy_array() - observed.to_xyxy_array()\n d = d.reshape((2*2, 2))\n d[:, 0] = d[:, 0] ** 2\n d[:, 1] = d[:, 1] ** 2\n d = np.sum(d, axis=1)\n d = np.average(d, axis=0)\n assert d < 0.5\n\n def test_image_bounding_box_alignment(self):\n # test alignment between between images and bounding boxes\n image = np.zeros((100, 100), dtype=np.uint8)\n image[35:35+1, 35:65+1] = 255\n image[65:65+1, 35:65+1] = 255\n image[35:65+1, 35:35+1] = 255\n image[35:65+1, 65:65+1] = 255\n bbs = [\n ia.BoundingBox(x1=35.5, y1=35.5, x2=65.5, y2=65.5)\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=image.shape)\n aug = iaa.ElasticTransformation(alpha=70, sigma=5)\n\n images_aug, bbsois_aug = aug(images=[image, image],\n bounding_boxes=[bbsoi, bbsoi])\n\n count_bad = 0\n for image_aug, bbsoi_aug in zip(images_aug, bbsois_aug):\n assert bbsoi_aug.shape == (100, 100)\n assert len(bbsoi_aug.bounding_boxes) == 1\n for bb_aug in bbsoi_aug.bounding_boxes:\n if bb_aug.is_fully_within_image(image_aug):\n # top, bottom, left, right\n x1 = bb_aug.x1_int\n x2 = bb_aug.x2_int\n y1 = bb_aug.y1_int\n y2 = bb_aug.y2_int\n top_row = image_aug[y1-2:y1+2, x1-2:x2+2]\n btm_row = image_aug[y2-2:y2+2, x1-2:x2+2]\n lft_row = image_aug[y1-2:y2+2, x1-2:x1+2]\n rgt_row = image_aug[y1-2:y2+2, x2-2:x2+2]\n assert np.max(top_row) > 10\n assert np.max(btm_row) > 10\n assert np.max(lft_row) > 10\n assert np.max(rgt_row) > 10\n else:\n count_bad += 1\n assert count_bad <= 1\n\n def test_empty_bounding_boxes(self):\n aug = iaa.ElasticTransformation(alpha=10, sigma=10)\n bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 10, 3))\n\n bbsoi_aug = aug.augment_bounding_boxes(bbsoi)\n\n assert len(bbsoi_aug.bounding_boxes) == 0\n assert bbsoi_aug.shape == (10, 10, 3)\n\n # -----------\n # heatmaps alignment\n # -----------\n def test_image_heatmaps_alignment(self):\n # test alignment between images and heatmaps\n for order in [0, 1, 3]:\n with self.subTest(order=order):\n img = np.zeros((80, 80), dtype=np.uint8)\n img[:, 30:50] = 255\n img[30:50, :] = 255\n hm = HeatmapsOnImage(img.astype(np.float32)/255.0, shape=(80, 80))\n aug = iaa.ElasticTransformation(\n alpha=60.0,\n sigma=4.0,\n mode=\"constant\",\n cval=0,\n order=order\n )\n aug_det = aug.to_deterministic()\n\n img_aug = aug_det.augment_image(img)\n hm_aug = aug_det.augment_heatmaps([hm])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = hm_aug.arr_0to1 > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (80, 80)\n assert hm_aug.arr_0to1.shape == (80, 80, 1)\n assert (same / img_aug_mask.size) >= 0.97\n\n def test_image_heatmaps_alignment_if_heatmaps_smaller_than_image(self):\n # test alignment between images and heatmaps\n # here with heatmaps that are smaller than the image\n for order in [0, 1, 3]:\n with self.subTest(order=order):\n img = np.zeros((80, 80), dtype=np.uint8)\n img[:, 30:50] = 255\n img[30:50, :] = 255\n img_small = ia.imresize_single_image(\n img, (40, 40), interpolation=\"nearest\")\n hm = HeatmapsOnImage(\n img_small.astype(np.float32)/255.0,\n shape=(80, 80))\n aug = iaa.ElasticTransformation(\n alpha=60.0, sigma=4.0, mode=\"constant\", cval=0)\n aug_det = aug.to_deterministic()\n\n img_aug = aug_det.augment_image(img)\n hm_aug = aug_det.augment_heatmaps([hm])[0]\n\n img_aug_mask = img_aug > 255*0.1\n hm_aug_mask = ia.imresize_single_image(\n hm_aug.arr_0to1, (80, 80), interpolation=\"nearest\"\n ) > 0.1\n same = np.sum(img_aug_mask == hm_aug_mask[:, :, 0])\n assert hm_aug.shape == (80, 80)\n assert hm_aug.arr_0to1.shape == (40, 40, 1)\n # TODO this is a fairly low threshold, why is that the case?\n assert (same / img_aug_mask.size) >= 0.9\n\n # -----------\n # segmaps alignment\n # -----------\n def test_image_segmaps_alignment(self):\n # test alignment between images and segmaps\n img = np.zeros((80, 80), dtype=np.uint8)\n img[:, 30:50] = 255\n img[30:50, :] = 255\n segmaps = SegmentationMapsOnImage(\n (img > 0).astype(np.int32),\n shape=(80, 80))\n aug = iaa.ElasticTransformation(\n alpha=60.0, sigma=4.0, mode=\"constant\", cval=0, order=0)\n aug_det = aug.to_deterministic()\n\n img_aug = aug_det.augment_image(img)\n segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]\n\n img_aug_mask = img_aug > 255*0.1\n segmaps_aug_mask = segmaps_aug.arr > 0\n same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])\n assert segmaps_aug.shape == (80, 80)\n assert segmaps_aug.arr.shape == (80, 80, 1)\n assert (same / img_aug_mask.size) >= 0.99\n\n def test_image_segmaps_alignment_if_heatmaps_smaller_than_image(self):\n # test alignment between images and segmaps\n # here with segmaps that are smaller than the image\n img = np.zeros((80, 80), dtype=np.uint8)\n img[:, 30:50] = 255\n img[30:50, :] = 255\n img_small = ia.imresize_single_image(\n img, (40, 40), interpolation=\"nearest\")\n segmaps = SegmentationMapsOnImage(\n (img_small > 0).astype(np.int32), shape=(80, 80))\n aug = iaa.ElasticTransformation(\n alpha=60.0, sigma=4.0, mode=\"constant\", cval=0, order=0)\n aug_det = aug.to_deterministic()\n\n img_aug = aug_det.augment_image(img)\n segmaps_aug = aug_det.augment_segmentation_maps([segmaps])[0]\n\n img_aug_mask = img_aug > 255*0.1\n segmaps_aug_mask = ia.imresize_single_image(\n segmaps_aug.arr, (80, 80), interpolation=\"nearest\") > 0\n same = np.sum(img_aug_mask == segmaps_aug_mask[:, :, 0])\n assert segmaps_aug.shape == (80, 80)\n assert segmaps_aug.arr.shape == (40, 40, 1)\n assert (same / img_aug_mask.size) >= 0.93\n\n # ---------\n # unusual channel numbers\n # ---------\n def test_unusual_channel_numbers(self):\n shapes = [\n (1, 1, 4),\n (1, 1, 5),\n (1, 1, 512),\n (1, 1, 513)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)\n\n image_aug = aug(image=image)\n\n assert np.all(image_aug == 0)\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n # ---------\n # zero-sized axes\n # ---------\n def test_zero_sized_axes(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n for keep_size in [False, True]:\n with self.subTest(shape=shape, keep_size=keep_size):\n for _ in sm.xrange(3):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.ElasticTransformation(alpha=2.0, sigma=2.0)\n\n image_aug = aug(image=image)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n\n # -----------\n # get_parameters\n # -----------\n def test_get_parameters(self):\n aug = iaa.ElasticTransformation(\n alpha=0.25, sigma=1.0, order=2, cval=10, mode=\"constant\")\n params = aug.get_parameters()\n assert params[0] is aug.alpha\n assert params[1] is aug.sigma\n assert params[2] is aug.order\n assert params[3] is aug.cval\n assert params[4] is aug.mode\n assert 0.25 - 1e-8 < params[0].value < 0.25 + 1e-8\n assert 1.0 - 1e-8 < params[1].value < 1.0 + 1e-8\n assert params[2].value == 2\n assert params[3].value == 10\n assert params[4].value == \"constant\"\n\n # -----------\n # other dtypes\n # -----------\n def test_other_dtypes_bool(self):\n aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)\n mask = np.zeros((21, 21), dtype=bool)\n mask[7:13, 7:13] = True\n\n image = np.zeros((21, 21), dtype=bool)\n image[mask] = True\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert not np.all(image_aug == 1)\n assert np.any(image_aug[~mask] == 1)\n\n def test_other_dtypes_uint_int(self):\n aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)\n mask = np.zeros((21, 21), dtype=bool)\n mask[7:13, 7:13] = True\n\n dtypes = [\"uint8\", \"uint16\", \"uint32\", \"int8\", \"int16\", \"int32\"]\n for dtype in dtypes:\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n image = np.zeros((21, 21), dtype=dtype)\n image[7:13, 7:13] = max_value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert not np.all(image_aug == max_value)\n assert np.any(image_aug[~mask] == max_value)\n\n def test_other_dtypes_float(self):\n aug = iaa.ElasticTransformation(sigma=0.5, alpha=5, order=0)\n mask = np.zeros((21, 21), dtype=bool)\n mask[7:13, 7:13] = True\n\n for dtype in [\"float16\", \"float32\", \"float64\"]:\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [\n 0.01,\n 1.0,\n 10.0,\n 100.0,\n 500 ** (isize - 1),\n float(np.float64(1000 ** (isize - 1)))\n ]\n values = values + [(-1) * value for value in values]\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((21, 21), dtype=dtype)\n image[7:13, 7:13] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert not np.all(_isclose(image_aug, value))\n assert np.any(_isclose(image_aug[~mask], value))\n\n def test_other_dtypes_bool_all_orders(self):\n mask = np.zeros((50, 50), dtype=bool)\n mask[10:40, 20:30] = True\n mask[20:30, 10:40] = True\n\n for order in [0, 1, 2, 3, 4, 5]:\n aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)\n\n image = np.zeros((50, 50), dtype=bool)\n image[mask] = True\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert not np.all(image_aug == 1)\n assert np.any(image_aug[~mask] == 1)\n\n def test_other_dtypes_uint_int_all_orders(self):\n mask = np.zeros((50, 50), dtype=bool)\n mask[10:40, 20:30] = True\n mask[20:30, 10:40] = True\n\n for order in [0, 1, 2, 3, 4, 5]:\n aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)\n\n dtypes = [\"uint8\", \"uint16\", \"uint32\", \"uint64\",\n \"int8\", \"int16\", \"int32\", \"int64\"]\n if order == 0:\n dtypes = [\"uint8\", \"uint16\", \"uint32\",\n \"int8\", \"int16\", \"int32\"]\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n dynamic_range = max_value - min_value\n\n image = np.zeros((50, 50), dtype=dtype)\n image[mask] = max_value\n image_aug = aug.augment_image(image)\n assert image_aug.dtype.name == dtype\n if order == 0:\n assert not np.all(image_aug == max_value)\n assert np.any(image_aug[~mask] == max_value)\n else:\n atol = 0.1 * dynamic_range\n assert not np.all(\n np.isclose(image_aug,\n max_value,\n rtol=0, atol=atol)\n )\n assert np.any(\n np.isclose(image_aug[~mask],\n max_value,\n rtol=0, atol=atol))\n\n def test_other_dtypes_float_all_orders(self):\n mask = np.zeros((50, 50), dtype=bool)\n mask[10:40, 20:30] = True\n mask[20:30, 10:40] = True\n\n for order in [0, 1, 2, 3, 4, 5]:\n aug = iaa.ElasticTransformation(sigma=1.0, alpha=50, order=order)\n\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n def _isclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.isclose(a, b, atol=atol, rtol=0)\n\n value = (\n 0.1 * max_value\n if dtype != \"float64\"\n else 0.0001 * max_value)\n image = np.zeros((50, 50), dtype=dtype)\n image[mask] = value\n image_aug = aug.augment_image(image)\n if order == 0:\n assert image_aug.dtype.name == dtype\n assert not np.all(\n _isclose(image_aug, value)\n )\n assert np.any(\n _isclose(image_aug[~mask], value)\n )\n else:\n atol = (\n 10\n if dtype == \"float16\"\n else 0.00001 * max_value)\n assert not np.all(\n np.isclose(\n image_aug,\n value,\n rtol=0, atol=atol\n ))\n assert np.any(\n np.isclose(\n image_aug[~mask],\n value,\n rtol=0, atol=atol\n ))\n\n def test_pickleable(self):\n aug = iaa.ElasticTransformation(alpha=(0.2, 1.5), sigma=(1.0, 10.0),\n seed=1)\n runtest_pickleable_uint8_img(aug, iterations=4, shape=(25, 25, 1))\n\n\nclass _TwoValueParam(iap.StochasticParameter):\n def __init__(self, v1, v2):\n super(_TwoValueParam, self).__init__()\n self.v1 = v1\n self.v2 = v2\n\n def _draw_samples(self, size, random_state):\n arr = np.full(size, self.v1, dtype=np.int32)\n arr[1::2] = self.v2\n return arr\n\n\nclass TestRot90(unittest.TestCase):\n @property\n def kp_offset(self):\n # set this to -1 when using integer-based KP rotation instead of\n # subpixel/float-based rotation\n return 0\n\n @property\n def image(self):\n return np.arange(4*4*3).reshape((4, 4, 3)).astype(np.uint8)\n\n @property\n def heatmaps(self):\n return HeatmapsOnImage(self.image[..., 0:1].astype(np.float32) / 255,\n shape=(4, 4, 3))\n\n @property\n def heatmaps_smaller(self):\n return HeatmapsOnImage(\n np.float32([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]), shape=(4, 8, 3))\n\n @property\n def segmaps(self):\n return SegmentationMapsOnImage(\n self.image[..., 0:1].astype(np.int32), shape=(4, 4, 3))\n\n @property\n def segmaps_smaller(self):\n return SegmentationMapsOnImage(\n np.int32([[0, 1, 2], [3, 4, 5]]), shape=(4, 8, 3))\n\n @property\n def kpsoi(self):\n kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=3)]\n return ia.KeypointsOnImage(kps, shape=(4, 8, 3))\n\n @property\n def psoi(self):\n return ia.PolygonsOnImage(\n [ia.Polygon([(1, 1), (3, 1), (3, 3), (1, 3)])],\n shape=(4, 8, 3)\n )\n\n @property\n def lsoi(self):\n return ia.LineStringsOnImage(\n [ia.LineString([(1, 1), (3, 1), (3, 3), (1, 3)])],\n shape=(4, 8, 3)\n )\n\n @property\n def bbsoi(self):\n return ia.BoundingBoxesOnImage(\n [ia.BoundingBox(x1=1, y1=1, x2=3, y2=3)],\n shape=(4, 8, 3)\n )\n\n @property\n def kpsoi_k1(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_kps = [(4-2+kp_offset, 1),\n (4-3+kp_offset, 2)]\n kps = [ia.Keypoint(x, y) for x, y in expected_k1_kps]\n return ia.KeypointsOnImage(kps, shape=(8, 4, 3))\n\n @property\n def kpsoi_k2(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_kps = self.kpsoi_k1.to_xy_array()\n expected_k2_kps = [\n (8-expected_k1_kps[0][1]+kp_offset, expected_k1_kps[0][0]),\n (8-expected_k1_kps[1][1]+kp_offset, expected_k1_kps[1][0])]\n kps = [ia.Keypoint(x, y) for x, y in expected_k2_kps]\n return ia.KeypointsOnImage(kps, shape=(4, 8, 3))\n\n @property\n def kpsoi_k3(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k2_kps = self.kpsoi_k2.to_xy_array()\n expected_k3_kps = [\n (4-expected_k2_kps[0][1]+kp_offset, expected_k2_kps[0][0]),\n (4-expected_k2_kps[1][1]+kp_offset, expected_k2_kps[1][0])]\n kps = [ia.Keypoint(x, y) for x, y in expected_k3_kps]\n return ia.KeypointsOnImage(kps, shape=(8, 4, 3))\n\n @property\n def psoi_k1(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_polys = [(4-1+kp_offset, 1),\n (4-1+kp_offset, 3),\n (4-3+kp_offset, 3),\n (4-3+kp_offset, 1)]\n return ia.PolygonsOnImage([ia.Polygon(expected_k1_polys)],\n shape=(8, 4, 3))\n\n @property\n def psoi_k2(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_polys = self.psoi_k1.polygons[0].exterior\n expected_k2_polys = [\n (8-expected_k1_polys[0][1]+kp_offset, expected_k1_polys[0][0]),\n (8-expected_k1_polys[1][1]+kp_offset, expected_k1_polys[1][0]),\n (8-expected_k1_polys[2][1]+kp_offset, expected_k1_polys[2][0]),\n (8-expected_k1_polys[3][1]+kp_offset, expected_k1_polys[3][0])]\n return ia.PolygonsOnImage([ia.Polygon(expected_k2_polys)],\n shape=(4, 8, 3))\n\n @property\n def psoi_k3(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k2_polys = self.psoi_k2.polygons[0].exterior\n expected_k3_polys = [\n (4-expected_k2_polys[0][1]+kp_offset, expected_k2_polys[0][0]),\n (4-expected_k2_polys[1][1]+kp_offset, expected_k2_polys[1][0]),\n (4-expected_k2_polys[2][1]+kp_offset, expected_k2_polys[2][0]),\n (4-expected_k2_polys[3][1]+kp_offset, expected_k2_polys[3][0])]\n return ia.PolygonsOnImage([ia.Polygon(expected_k3_polys)],\n shape=(8, 4, 3))\n\n @property\n def lsoi_k1(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_ls = [(4-1+kp_offset, 1),\n (4-1+kp_offset, 3),\n (4-3+kp_offset, 3),\n (4-3+kp_offset, 1)]\n return ia.LineStringsOnImage([ia.LineString(expected_k1_ls)],\n shape=(8, 4, 3))\n\n @property\n def lsoi_k2(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_ls = self.psoi_k1.items[0].coords\n expected_k2_ls = [\n (8-expected_k1_ls[0][1]+kp_offset, expected_k1_ls[0][0]),\n (8-expected_k1_ls[1][1]+kp_offset, expected_k1_ls[1][0]),\n (8-expected_k1_ls[2][1]+kp_offset, expected_k1_ls[2][0]),\n (8-expected_k1_ls[3][1]+kp_offset, expected_k1_ls[3][0])]\n return ia.LineStringsOnImage([ia.LineString(expected_k2_ls)],\n shape=(4, 8, 3))\n\n @property\n def lsoi_k3(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k2_ls = self.lsoi_k2.items[0].coords\n expected_k3_ls = [\n (4-expected_k2_ls[0][1]+kp_offset, expected_k2_ls[0][0]),\n (4-expected_k2_ls[1][1]+kp_offset, expected_k2_ls[1][0]),\n (4-expected_k2_ls[2][1]+kp_offset, expected_k2_ls[2][0]),\n (4-expected_k2_ls[3][1]+kp_offset, expected_k2_ls[3][0])]\n return ia.LineStringsOnImage([ia.LineString(expected_k3_ls)],\n shape=(8, 4, 3))\n\n @property\n def bbsoi_k1(self):\n # without keep size\n kp_offset = self.kp_offset\n expected_k1_coords = [\n (4-1+kp_offset, 1),\n (4-3+kp_offset, 3)]\n return ia.BoundingBoxesOnImage([\n ia.BoundingBox(\n x1=min(expected_k1_coords[0][0], expected_k1_coords[1][0]),\n y1=min(expected_k1_coords[0][1], expected_k1_coords[1][1]),\n x2=max(expected_k1_coords[1][0], expected_k1_coords[0][0]),\n y2=max(expected_k1_coords[1][1], expected_k1_coords[0][1])\n )], shape=(8, 4, 3))\n\n @property\n def bbsoi_k2(self):\n # without keep size\n kp_offset = self.kp_offset\n coords = self.bbsoi_k1.bounding_boxes[0].coords\n expected_k2_coords = [\n (8-coords[0][1]+kp_offset, coords[0][0]),\n (8-coords[1][1]+kp_offset, coords[1][0])]\n return ia.BoundingBoxesOnImage([\n ia.BoundingBox(\n x1=min(expected_k2_coords[0][0], expected_k2_coords[1][0]),\n y1=min(expected_k2_coords[0][1], expected_k2_coords[1][1]),\n x2=max(expected_k2_coords[1][0], expected_k2_coords[0][0]),\n y2=max(expected_k2_coords[1][1], expected_k2_coords[0][1])\n )],\n shape=(4, 8, 3))\n\n @property\n def bbsoi_k3(self):\n # without keep size\n kp_offset = self.kp_offset\n coords = self.bbsoi_k2.bounding_boxes[0].coords\n expected_k3_coords = [\n (4-coords[0][1]+kp_offset, coords[0][0]),\n (4-coords[1][1]+kp_offset, coords[1][0])]\n return ia.BoundingBoxesOnImage([\n ia.BoundingBox(\n x1=min(expected_k3_coords[0][0], expected_k3_coords[1][0]),\n y1=min(expected_k3_coords[0][1], expected_k3_coords[1][1]),\n x2=max(expected_k3_coords[1][0], expected_k3_coords[0][0]),\n y2=max(expected_k3_coords[1][1], expected_k3_coords[0][1])\n )],\n shape=(8, 4, 3))\n\n def test___init___k_is_list(self):\n aug = iaa.Rot90([1, 3])\n assert is_parameter_instance(aug.k, iap.Choice)\n assert len(aug.k.a) == 2\n assert aug.k.a[0] == 1\n assert aug.k.a[1] == 3\n\n def test___init___k_is_all(self):\n aug = iaa.Rot90(ia.ALL)\n assert is_parameter_instance(aug.k, iap.Choice)\n assert len(aug.k.a) == 4\n assert aug.k.a == [0, 1, 2, 3]\n\n def test_images_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n img_aug = aug.augment_image(self.image)\n\n assert img_aug.dtype.name == \"uint8\"\n assert np.array_equal(img_aug, self.image)\n\n def test_heatmaps_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n hms_aug = aug.augment_heatmaps([self.heatmaps])[0]\n\n assert (hms_aug.arr_0to1.dtype.name\n == self.heatmaps.arr_0to1.dtype.name)\n assert np.allclose(hms_aug.arr_0to1, self.heatmaps.arr_0to1)\n assert hms_aug.shape == self.heatmaps.shape\n\n def test_segmaps_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n segmaps_aug = aug.augment_segmentation_maps(\n [self.segmaps]\n )[0]\n\n assert (\n segmaps_aug.arr.dtype.name\n == self.segmaps.arr.dtype.name)\n assert np.allclose(segmaps_aug.arr, self.segmaps.arr)\n assert segmaps_aug.shape == self.segmaps.shape\n\n def test_keypoints_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]\n\n assert_cbaois_equal(kpsoi_aug, self.kpsoi)\n\n def test_polygons_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n psoi_aug = aug.augment_polygons(self.psoi)\n\n assert_cbaois_equal(psoi_aug, self.psoi)\n\n def test_line_strings_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n lsoi_aug = aug.augment_line_strings(self.lsoi)\n\n assert_cbaois_equal(lsoi_aug, self.lsoi)\n\n def test_bounding_boxes_k_is_0_and_4(self):\n for k in [0, 4]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)\n\n assert_cbaois_equal(bbsoi_aug, self.bbsoi)\n\n def test_images_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n img_aug = aug.augment_image(self.image)\n\n assert img_aug.dtype.name == \"uint8\"\n assert np.array_equal(img_aug,\n np.rot90(self.image, 1, axes=(1, 0)))\n\n def test_heatmaps_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n hms_aug = aug.augment_heatmaps([self.heatmaps])[0]\n\n assert (hms_aug.arr_0to1.dtype.name\n == self.heatmaps.arr_0to1.dtype.name)\n assert np.allclose(\n hms_aug.arr_0to1,\n np.rot90(self.heatmaps.arr_0to1, 1, axes=(1, 0)))\n assert hms_aug.shape == (4, 4, 3)\n\n def test_heatmaps_smaller_than_image_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n hms_smaller_aug = aug.augment_heatmaps(\n [self.heatmaps_smaller]\n )[0]\n\n assert (\n hms_smaller_aug.arr_0to1.dtype.name\n == self.heatmaps_smaller.arr_0to1.dtype.name)\n assert np.allclose(\n hms_smaller_aug.arr_0to1,\n np.rot90(self.heatmaps_smaller.arr_0to1, 1, axes=(1, 0)))\n assert hms_smaller_aug.shape == (8, 4, 3)\n\n def test_segmaps_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n segmaps_aug = aug.augment_segmentation_maps(\n [self.segmaps]\n )[0]\n\n assert (\n segmaps_aug.arr.dtype.name\n == self.segmaps.arr.dtype.name)\n assert np.allclose(\n segmaps_aug.arr,\n np.rot90(self.segmaps.arr, 1, axes=(1, 0)))\n assert segmaps_aug.shape == (4, 4, 3)\n\n def test_segmaps_smaller_than_image_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n segmaps_smaller_aug = aug.augment_segmentation_maps(\n self.segmaps_smaller)\n\n assert (\n segmaps_smaller_aug.arr.dtype.name\n == self.segmaps_smaller.arr.dtype.name)\n assert np.allclose(\n segmaps_smaller_aug.arr,\n np.rot90(self.segmaps_smaller.arr, 1, axes=(1, 0)))\n assert segmaps_smaller_aug.shape == (8, 4, 3)\n\n def test_keypoints_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]\n\n assert_cbaois_equal(kpsoi_aug, self.kpsoi_k1)\n\n def test_polygons_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n psoi_aug = aug.augment_polygons(self.psoi)\n\n assert_cbaois_equal(psoi_aug, self.psoi_k1)\n\n def test_line_strings_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n lsoi_aug = aug.augment_line_strings(self.lsoi)\n\n assert_cbaois_equal(lsoi_aug, self.lsoi_k1)\n\n def test_bounding_boxes_k_is_1_and_5(self):\n for k in [1, 5]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)\n\n assert_cbaois_equal(bbsoi_aug, self.bbsoi_k1)\n\n def test_images_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n img = self.image\n\n img_aug = aug.augment_image(img)\n\n assert img_aug.dtype.name == \"uint8\"\n assert np.array_equal(img_aug, np.rot90(img, 2, axes=(1, 0)))\n\n def test_heatmaps_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n hms = self.heatmaps\n\n hms_aug = aug.augment_heatmaps([hms])[0]\n\n assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name\n assert np.allclose(\n hms_aug.arr_0to1,\n np.rot90(hms.arr_0to1, 2, axes=(1, 0)))\n assert hms_aug.shape == (4, 4, 3)\n\n def test_heatmaps_smaller_than_image_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n hms_smaller = self.heatmaps_smaller\n\n hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]\n\n assert (hms_smaller_aug.arr_0to1.dtype.name\n == hms_smaller.arr_0to1.dtype.name)\n assert np.allclose(\n hms_smaller_aug.arr_0to1,\n np.rot90(hms_smaller.arr_0to1, 2, axes=(1, 0)))\n assert hms_smaller_aug.shape == (4, 8, 3)\n\n def test_segmaps_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n segmaps = self.segmaps\n\n segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]\n\n assert segmaps_aug.arr.dtype.name == segmaps.arr.dtype.name\n assert np.allclose(\n segmaps_aug.arr,\n np.rot90(segmaps.arr, 2, axes=(1, 0)))\n assert segmaps_aug.shape == (4, 4, 3)\n\n def test_segmaps_smaller_than_image_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n segmaps_smaller = self.segmaps_smaller\n\n segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)\n\n assert (segmaps_smaller_aug.arr.dtype.name\n == segmaps_smaller.arr.dtype.name)\n assert np.allclose(\n segmaps_smaller_aug.arr,\n np.rot90(segmaps_smaller.arr, 2, axes=(1, 0)))\n assert segmaps_smaller_aug.shape == (4, 8, 3)\n\n def test_keypoints_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n\n kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]\n\n assert_cbaois_equal(kpsoi_aug, self.kpsoi_k2)\n\n def test_polygons_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n\n psoi_aug = aug.augment_polygons(self.psoi)\n\n assert_cbaois_equal(psoi_aug, self.psoi_k2)\n\n def test_line_strings_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n\n lsoi_aug = aug.augment_line_strings(self.lsoi)\n\n assert_cbaois_equal(lsoi_aug, self.lsoi_k2)\n\n def test_bounding_boxes_k_is_2(self):\n aug = iaa.Rot90(2, keep_size=False)\n\n bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)\n\n assert_cbaois_equal(bbsoi_aug, self.bbsoi_k2)\n\n def test_images_k_is_3_and_minus1(self):\n img = self.image\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n img_aug = aug.augment_image(img)\n\n assert img_aug.dtype.name == \"uint8\"\n assert np.array_equal(img_aug, np.rot90(img, 3, axes=(1, 0)))\n\n def test_heatmaps_k_is_3_and_minus1(self):\n hms = self.heatmaps\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n hms_aug = aug.augment_heatmaps([hms])[0]\n\n assert (hms_aug.arr_0to1.dtype.name\n == hms.arr_0to1.dtype.name)\n assert np.allclose(\n hms_aug.arr_0to1,\n np.rot90(hms.arr_0to1, 3, axes=(1, 0)))\n assert hms_aug.shape == (4, 4, 3)\n\n def test_heatmaps_smaller_than_image_k_is_3_and_minus1(self):\n hms_smaller = self.heatmaps_smaller\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]\n\n assert (hms_smaller_aug.arr_0to1.dtype.name\n == hms_smaller.arr_0to1.dtype.name)\n assert np.allclose(\n hms_smaller_aug.arr_0to1,\n np.rot90(hms_smaller.arr_0to1, 3, axes=(1, 0)))\n assert hms_smaller_aug.shape == (8, 4, 3)\n\n def test_segmaps_k_is_3_and_minus1(self):\n segmaps = self.segmaps\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]\n\n assert (segmaps_aug.arr.dtype.name\n == segmaps.arr.dtype.name)\n assert np.allclose(\n segmaps_aug.arr,\n np.rot90(segmaps.arr, 3, axes=(1, 0)))\n assert segmaps_aug.shape == (4, 4, 3)\n\n def test_segmaps_smaller_than_image_k_is_3_and_minus1(self):\n segmaps_smaller = self.segmaps_smaller\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n segmaps_smaller_aug = aug.augment_segmentation_maps(\n segmaps_smaller)\n\n assert (segmaps_smaller_aug.arr.dtype.name\n == segmaps_smaller.arr.dtype.name)\n assert np.allclose(\n segmaps_smaller_aug.arr,\n np.rot90(segmaps_smaller.arr, 3, axes=(1, 0)))\n assert segmaps_smaller_aug.shape == (8, 4, 3)\n\n def test_keypoints_k_is_3_and_minus1(self):\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n kpsoi_aug = aug.augment_keypoints([self.kpsoi])[0]\n\n assert_cbaois_equal(kpsoi_aug, self.kpsoi_k3)\n\n def test_polygons_k_is_3_and_minus1(self):\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n psoi_aug = aug.augment_polygons(self.psoi)\n\n assert_cbaois_equal(psoi_aug, self.psoi_k3)\n\n def test_line_strings_k_is_3_and_minus1(self):\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n lsoi_aug = aug.augment_line_strings(self.lsoi)\n\n assert_cbaois_equal(lsoi_aug, self.lsoi_k3)\n\n def test_bounding_boxes_k_is_3_and_minus1(self):\n for k in [3, -1]:\n with self.subTest(k=k):\n aug = iaa.Rot90(k, keep_size=False)\n\n bbsoi_aug = aug.augment_bounding_boxes(self.bbsoi)\n\n assert_cbaois_equal(bbsoi_aug, self.bbsoi_k3)\n\n def test_images_k_is_1_verify_without_using_numpy_rot90(self):\n # verify once without np.rot90\n aug = iaa.Rot90(k=1, keep_size=False)\n image = np.uint8([[1, 0, 0],\n [0, 2, 0]])\n\n img_aug = aug.augment_image(image)\n\n expected = np.uint8([[0, 1], [2, 0], [0, 0]])\n assert np.array_equal(img_aug, expected)\n\n def test_images_k_is_1_keep_size_is_true(self):\n # keep_size=True, k=1\n aug = iaa.Rot90(1, keep_size=True)\n img_nonsquare = np.arange(5*4*3).reshape((5, 4, 3)).astype(np.uint8)\n\n img_aug = aug.augment_image(img_nonsquare)\n\n assert img_aug.dtype.name == \"uint8\"\n assert np.array_equal(\n img_aug,\n ia.imresize_single_image(\n np.rot90(img_nonsquare, 1, axes=(1, 0)),\n (5, 4)\n )\n )\n\n def test_heatmaps_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n hms = self.heatmaps\n\n hms_aug = aug.augment_heatmaps([hms])[0]\n\n assert hms_aug.arr_0to1.dtype.name == hms.arr_0to1.dtype.name\n assert np.allclose(\n hms_aug.arr_0to1,\n np.rot90(hms.arr_0to1, 1, axes=(1, 0)))\n assert hms_aug.shape == (4, 4, 3)\n\n def test_heatmaps_smaller_than_image_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n hms_smaller = self.heatmaps_smaller\n\n hms_smaller_aug = aug.augment_heatmaps([hms_smaller])[0]\n\n hms_smaller_rot = np.rot90(hms_smaller.arr_0to1, 1, axes=(1, 0))\n hms_smaller_rot = np.clip(\n ia.imresize_single_image(\n hms_smaller_rot, (2, 3), interpolation=\"cubic\"\n ),\n 0.0, 1.0)\n assert (hms_smaller_aug.arr_0to1.dtype.name\n == hms_smaller.arr_0to1.dtype.name)\n assert np.allclose(hms_smaller_aug.arr_0to1, hms_smaller_rot)\n assert hms_smaller_aug.shape == (4, 8, 3)\n\n def test_segmaps_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n segmaps = self.segmaps\n\n segmaps_aug = aug.augment_segmentation_maps([segmaps])[0]\n\n assert (segmaps_aug.arr.dtype.name\n == segmaps.arr.dtype.name)\n assert np.allclose(segmaps_aug.arr,\n np.rot90(segmaps.arr, 1, axes=(1, 0)))\n assert segmaps_aug.shape == (4, 4, 3)\n\n def test_segmaps_smaller_than_image_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n segmaps_smaller = self.segmaps_smaller\n\n segmaps_smaller_aug = aug.augment_segmentation_maps(segmaps_smaller)\n\n segmaps_smaller_rot = np.rot90(segmaps_smaller.arr, 1, axes=(1, 0))\n segmaps_smaller_rot = ia.imresize_single_image(\n segmaps_smaller_rot, (2, 3), interpolation=\"nearest\")\n assert (segmaps_smaller_aug.arr.dtype.name\n == segmaps_smaller.arr.dtype.name)\n assert np.allclose(segmaps_smaller_aug.arr, segmaps_smaller_rot)\n assert segmaps_smaller_aug.shape == (4, 8, 3)\n\n def test_keypoints_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n kp_offset = self.kp_offset\n kpsoi = self.kpsoi\n\n kpsoi_aug = aug.augment_keypoints([kpsoi])[0]\n\n expected = [(4-2+kp_offset, 1), (4-3+kp_offset, 2)]\n expected = [(8*x/4, 4*y/8) for x, y in expected]\n assert kpsoi_aug.shape == (4, 8, 3)\n for kp_aug, kp in zip(kpsoi_aug.keypoints, expected):\n assert np.allclose([kp_aug.x, kp_aug.y], [kp[0], kp[1]])\n\n def test_polygons_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n psoi = self.psoi\n kp_offset = self.kp_offset\n\n psoi_aug = aug.augment_polygons(psoi)\n\n expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),\n (4-3+kp_offset, 3), (4-3+kp_offset, 1)]\n expected = [(8*x/4, 4*y/8) for x, y in expected]\n assert psoi_aug.shape == (4, 8, 3)\n assert len(psoi_aug.polygons) == 1\n assert psoi_aug.polygons[0].is_valid\n assert psoi_aug.polygons[0].exterior_almost_equals(expected)\n\n def test_line_strings_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n lsoi = self.lsoi\n kp_offset = self.kp_offset\n\n lsoi_aug = aug.augment_line_strings(lsoi)\n\n expected = [(4-1+kp_offset, 1), (4-1+kp_offset, 3),\n (4-3+kp_offset, 3), (4-3+kp_offset, 1)]\n expected = [(8*x/4, 4*y/8) for x, y in expected]\n assert lsoi_aug.shape == (4, 8, 3)\n assert len(lsoi_aug.items) == 1\n assert lsoi_aug.items[0].coords_almost_equals(expected)\n\n def test_bounding_boxes_k_is_1_keep_size_is_true(self):\n aug = iaa.Rot90(1, keep_size=True)\n bbsoi = self.bbsoi\n kp_offset = self.kp_offset\n\n bbsoi_aug = aug.augment_bounding_boxes(bbsoi)\n\n expected = [(4-1+kp_offset, 1),\n (4-3+kp_offset, 3)]\n expected = [(8*x/4, 4*y/8) for x, y in expected]\n expected = np.float32([\n [min(expected[0][0], expected[1][0]),\n min(expected[0][1], expected[1][1])],\n [max(expected[0][0], expected[1][0]),\n max(expected[0][1], expected[1][1])]\n ])\n assert bbsoi_aug.shape == (4, 8, 3)\n assert len(bbsoi_aug.bounding_boxes) == 1\n assert bbsoi_aug.bounding_boxes[0].coords_almost_equals(expected)\n\n def test_images_k_is_list(self):\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n img = self.image\n\n imgs_aug = aug.augment_images([img] * 4)\n\n assert np.array_equal(imgs_aug[0], np.rot90(img, 1, axes=(1, 0)))\n assert np.array_equal(imgs_aug[1], np.rot90(img, 2, axes=(1, 0)))\n assert np.array_equal(imgs_aug[2], np.rot90(img, 1, axes=(1, 0)))\n assert np.array_equal(imgs_aug[3], np.rot90(img, 2, axes=(1, 0)))\n\n def test_heatmaps_smaller_than_image_k_is_list(self):\n def _rot_hm(hm, k):\n return np.rot90(hm.arr_0to1, k, axes=(1, 0))\n\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n hms_smaller = self.heatmaps_smaller\n\n hms_aug = aug.augment_heatmaps([hms_smaller] * 4)\n\n assert hms_aug[0].shape == (8, 4, 3)\n assert hms_aug[1].shape == (4, 8, 3)\n assert hms_aug[2].shape == (8, 4, 3)\n assert hms_aug[3].shape == (4, 8, 3)\n assert np.allclose(hms_aug[0].arr_0to1, _rot_hm(hms_smaller, 1))\n assert np.allclose(hms_aug[1].arr_0to1, _rot_hm(hms_smaller, 2))\n assert np.allclose(hms_aug[2].arr_0to1, _rot_hm(hms_smaller, 1))\n assert np.allclose(hms_aug[3].arr_0to1, _rot_hm(hms_smaller, 2))\n\n def test_segmaps_smaller_than_image_k_is_list(self):\n def _rot_sm(segmap, k):\n return np.rot90(segmap.arr, k, axes=(1, 0))\n\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n segmaps_smaller = self.segmaps_smaller\n\n segmaps_aug = aug.augment_segmentation_maps([segmaps_smaller] * 4)\n\n assert segmaps_aug[0].shape == (8, 4, 3)\n assert segmaps_aug[1].shape == (4, 8, 3)\n assert segmaps_aug[2].shape == (8, 4, 3)\n assert segmaps_aug[3].shape == (4, 8, 3)\n assert np.allclose(segmaps_aug[0].arr, _rot_sm(segmaps_smaller, 1))\n assert np.allclose(segmaps_aug[1].arr, _rot_sm(segmaps_smaller, 2))\n assert np.allclose(segmaps_aug[2].arr, _rot_sm(segmaps_smaller, 1))\n assert np.allclose(segmaps_aug[3].arr, _rot_sm(segmaps_smaller, 2))\n\n def test_keypoints_k_is_list(self):\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n kpsoi = self.kpsoi\n\n kpsoi_aug = aug.augment_keypoints([kpsoi] * 4)\n\n assert_cbaois_equal(kpsoi_aug[0], self.kpsoi_k1)\n assert_cbaois_equal(kpsoi_aug[1], self.kpsoi_k2)\n assert_cbaois_equal(kpsoi_aug[2], self.kpsoi_k1)\n assert_cbaois_equal(kpsoi_aug[3], self.kpsoi_k2)\n\n def test_polygons_k_is_list(self):\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n psoi = self.psoi\n\n psoi_aug = aug.augment_polygons([psoi] * 4)\n\n assert_cbaois_equal(psoi_aug[0], self.psoi_k1)\n assert_cbaois_equal(psoi_aug[1], self.psoi_k2)\n assert_cbaois_equal(psoi_aug[2], self.psoi_k1)\n assert_cbaois_equal(psoi_aug[3], self.psoi_k2)\n\n def test_line_strings_k_is_list(self):\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n lsoi = self.lsoi\n\n lsoi_aug = aug.augment_line_strings([lsoi] * 4)\n\n assert_cbaois_equal(lsoi_aug[0], self.lsoi_k1)\n assert_cbaois_equal(lsoi_aug[1], self.lsoi_k2)\n assert_cbaois_equal(lsoi_aug[2], self.lsoi_k1)\n assert_cbaois_equal(lsoi_aug[3], self.lsoi_k2)\n\n def test_bounding_boxes_k_is_list(self):\n aug = iaa.Rot90(_TwoValueParam(1, 2), keep_size=False)\n bbsoi = self.bbsoi\n\n bbsoi_aug = aug.augment_bounding_boxes([bbsoi] * 4)\n\n assert_cbaois_equal(bbsoi_aug[0], self.bbsoi_k1)\n assert_cbaois_equal(bbsoi_aug[1], self.bbsoi_k2)\n assert_cbaois_equal(bbsoi_aug[2], self.bbsoi_k1)\n assert_cbaois_equal(bbsoi_aug[3], self.bbsoi_k2)\n\n def test_empty_keypoints(self):\n aug = iaa.Rot90(k=1, keep_size=False)\n kpsoi = ia.KeypointsOnImage([], shape=(4, 8, 3))\n\n kpsoi_aug = aug.augment_keypoints(kpsoi)\n\n expected = self.kpsoi_k1\n expected.keypoints = []\n assert_cbaois_equal(kpsoi_aug, expected)\n\n def test_empty_polygons(self):\n aug = iaa.Rot90(k=1, keep_size=False)\n psoi = ia.PolygonsOnImage([], shape=(4, 8, 3))\n\n psoi_aug = aug.augment_polygons(psoi)\n\n expected = self.psoi_k1\n expected.polygons = []\n assert_cbaois_equal(psoi_aug, expected)\n\n def test_empty_line_strings(self):\n aug = iaa.Rot90(k=1, keep_size=False)\n lsoi = ia.LineStringsOnImage([], shape=(4, 8, 3))\n\n lsoi_aug = aug.augment_line_strings(lsoi)\n\n expected = self.lsoi_k1\n expected.line_strings = []\n assert_cbaois_equal(lsoi_aug, expected)\n\n def test_empty_bounding_boxes(self):\n aug = iaa.Rot90(k=1, keep_size=False)\n bbsoi = ia.BoundingBoxesOnImage([], shape=(4, 8, 3))\n\n bbsoi_aug = aug.augment_bounding_boxes(bbsoi)\n\n expected = self.bbsoi_k1\n expected.bounding_boxes = []\n assert_cbaois_equal(bbsoi_aug, expected)\n\n def test_unusual_channel_numbers(self):\n shapes = [\n (1, 1, 4),\n (1, 1, 5),\n (1, 1, 512),\n (1, 1, 513)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.Rot90(k=1)\n\n image_aug = aug(image=image)\n\n shape_expected = tuple([shape[1], shape[0]] + list(shape[2:]))\n assert np.all(image_aug == 0)\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape_expected\n\n def test_zero_sized_axes_k_0_or_2(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n for keep_size in [False, True]:\n with self.subTest(shape=shape, keep_size=keep_size):\n for _ in sm.xrange(10):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.Rot90([0, 2], keep_size=keep_size)\n\n image_aug = aug(image=image)\n\n assert image_aug.shape == shape\n\n def test_zero_sized_axes_k_1_or_3_no_keep_size(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n for _ in sm.xrange(10):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.Rot90([1, 3], keep_size=False)\n\n image_aug = aug(image=image)\n\n shape_expected = tuple([shape[1], shape[0]]\n + list(shape[2:]))\n assert image_aug.shape == shape_expected\n\n def test_zero_sized_axes_k_1_or_3_keep_size(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n for _ in sm.xrange(10):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.Rot90([1, 3], keep_size=True)\n\n image_aug = aug(image=image)\n\n assert image_aug.shape == image.shape\n\n def test_get_parameters(self):\n aug = iaa.Rot90([1, 3], keep_size=False)\n assert aug.get_parameters()[0] == aug.k\n assert aug.get_parameters()[1] is False\n\n def test_other_dtypes_bool(self):\n aug = iaa.Rot90(2)\n\n image = np.zeros((3, 3), dtype=bool)\n image[0, 0] = True\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == image.dtype.name\n assert np.all(image_aug[0, 0] == 0)\n assert np.all(image_aug[2, 2] == 1)\n\n def test_other_dtypes_uint_int(self):\n aug = iaa.Rot90(2)\n\n dtypes = [\"uint8\", \"uint16\", \"uint32\", \"uint64\",\n \"int8\", \"int16\", \"int32\", \"int64\"]\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n\n image = np.zeros((3, 3), dtype=dtype)\n image[0, 0] = max_value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert np.all(image_aug[0, 0] == 0)\n assert np.all(image_aug[2, 2] == max_value)\n\n def test_other_dtypes_float(self):\n aug = iaa.Rot90(2)\n\n try:\n high_res_dt = np.float128\n dtypes = [\"float16\", \"float32\", \"float64\", \"float128\"]\n except AttributeError:\n high_res_dt = np.float64\n dtypes = [\"float16\", \"float32\", \"float64\"]\n\n for dtype in dtypes:\n def _allclose(a, b):\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n return np.allclose(a, b, atol=atol, rtol=0)\n\n isize = np.dtype(dtype).itemsize\n values = [\n 0,\n 1.0,\n 10.0,\n 100.0,\n high_res_dt(500 ** (isize-1)),\n high_res_dt(1000 ** (isize-1))\n ]\n values = values + [(-1) * value for value in values]\n for value in values:\n with self.subTest(dtype=dtype, value=value):\n image = np.zeros((3, 3), dtype=dtype)\n image[0, 0] = value\n\n image_aug = aug.augment_image(image)\n\n assert image_aug.dtype.name == dtype\n assert _allclose(image_aug[0, 0], 0)\n assert _allclose(image_aug[2, 2], high_res_dt(value))\n\n def test_pickleable(self):\n aug = iaa.Rot90([0, 1, 2, 3], seed=1)\n runtest_pickleable_uint8_img(aug, iterations=5)\n\n\nclass TestWithPolarWarping(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init___single_augmenter_as_child(self):\n aug = iaa.WithPolarWarping(iaa.Noop())\n assert isinstance(aug.children, iaa.Sequential)\n assert isinstance(aug.children[0], iaa.Noop)\n\n def test___init___list_of_augmenters_as_child(self):\n aug = iaa.WithPolarWarping([iaa.Noop(), iaa.Noop()])\n assert isinstance(aug.children, iaa.Sequential)\n assert isinstance(aug.children[0], iaa.Noop)\n assert isinstance(aug.children[1], iaa.Noop)\n\n def test_images_no_change(self):\n image = np.mod(np.arange(10*20*3), 255).astype(np.uint8)\n image = image.reshape((10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n image_aug = aug(image=image)\n\n avg_dist = np.average(\n np.abs(\n image_aug.astype(np.int32)[2:-2, 2:-2]\n - image.astype(np.int32)[2:-2, 2:-2]\n )\n )\n assert image_aug.shape == (10, 20, 3)\n assert avg_dist < 7.0\n\n def test_heatmaps_no_change(self):\n hm = np.linspace(0, 1.0, 10*20, dtype=np.float32).reshape((10, 20, 1))\n hm = ia.HeatmapsOnImage(hm, shape=(10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n hm_aug = aug(heatmaps=hm)\n\n avg_dist = np.average(\n np.abs(\n hm_aug.get_arr()[2:-2, 2:-2]\n - hm.get_arr()[2:-2, 2:-2]\n )\n )\n assert hm_aug.shape == (10, 20, 3)\n assert avg_dist < 0.0125\n\n def test_segmentation_maps_no_change(self):\n sm = np.zeros((10, 20, 1), dtype=np.int32)\n sm[1, 0:5] = 1\n sm[3:3, 3:3] = 2\n sm[7:9, :] = 3\n sm = ia.SegmentationMapsOnImage(sm, shape=(10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n sm_aug = aug(segmentation_maps=sm)\n\n p_same = np.average(\n sm_aug.get_arr()[2:-2, 2:-2]\n == sm.get_arr()[2:-2, 2:-2]\n )\n assert sm_aug.shape == (10, 20, 3)\n assert p_same > 0.95\n\n def test_keypoints_no_change(self):\n kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=5, y=5),\n ia.Keypoint(x=5, y=9)]\n kpsoi = ia.KeypointsOnImage(kps, shape=(10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n kpsoi_aug = aug(keypoints=kpsoi)\n\n assert kpsoi_aug.shape == (10, 20, 3)\n assert np.allclose(kpsoi_aug.to_xy_array(), kpsoi.to_xy_array(),\n atol=0.01)\n\n def test_bounding_boxes_no_change(self):\n bbs = [\n ia.BoundingBox(x1=1, y1=2, x2=3, y2=4, label=\"foo\"),\n ia.BoundingBox(x1=3, y1=5, x2=7, y2=10),\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n bbsoi_aug = aug(bounding_boxes=bbsoi)\n\n assert bbsoi_aug.items[0].label == \"foo\"\n assert bbsoi_aug.items[1].label is None\n assert bbsoi_aug.shape == (10, 20, 3)\n assert np.allclose(bbsoi_aug.to_xy_array(), bbsoi.to_xy_array(),\n atol=0.01)\n\n def test_polygons_no_change(self):\n ps = [\n ia.Polygon([(0, 2), (4, 2), (4, 4)], label=\"foo\"),\n ia.Polygon([(0, 0), (5, 0), (5, 5), (0, 5)])\n ]\n psoi = ia.PolygonsOnImage(ps, shape=(10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n psoi_aug = aug(polygons=psoi)\n\n assert psoi_aug.items[0].label == \"foo\"\n assert psoi_aug.items[1].label is None\n assert psoi_aug.shape == (10, 20, 3)\n assert np.allclose(psoi_aug.to_xy_array(), psoi.to_xy_array(),\n atol=0.01)\n\n def test_line_strings_no_change(self):\n ls = [\n ia.LineString([(0, 2), (4, 2), (4, 4)]),\n ia.LineString([(0, 0), (5, 0), (5, 5), (0, 5)])\n ]\n lsoi = ia.LineStringsOnImage(ls, shape=(10, 20, 3))\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n lsoi_aug = aug(line_strings=lsoi)\n\n assert lsoi_aug.shape == (10, 20, 3)\n assert np.allclose(lsoi_aug.to_xy_array(), lsoi.to_xy_array(),\n atol=0.01)\n\n def test_bounding_boxes_and_polygons_provided_no_change(self):\n bbs = [\n ia.BoundingBox(x1=1, y1=2, x2=3, y2=4, label=\"foo\"),\n ia.BoundingBox(x1=3, y1=5, x2=7, y2=10),\n ]\n bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(10, 20, 3))\n ps = [\n ia.Polygon([(0, 2), (4, 2), (4, 4)], label=\"foo\"),\n ia.Polygon([(0, 0), (5, 0), (5, 5), (0, 5)])\n ]\n psoi = ia.PolygonsOnImage(ps, shape=(10, 20, 3))\n\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n aug = aug.to_deterministic()\n bbsoi_aug = aug.augment_bounding_boxes(bbsoi)\n psoi_aug = aug.augment_polygons(psoi)\n\n assert bbsoi_aug.items[0].label == \"foo\"\n assert bbsoi_aug.items[1].label is None\n assert bbsoi_aug.shape == (10, 20, 3)\n assert np.allclose(bbsoi_aug.to_xy_array(), bbsoi.to_xy_array(),\n atol=0.01)\n\n assert psoi_aug.items[0].label == \"foo\"\n assert psoi_aug.items[1].label is None\n assert psoi_aug.shape == (10, 20, 3)\n assert np.allclose(psoi_aug.to_xy_array(), psoi.to_xy_array(),\n atol=0.01)\n\n def test_images_translation_x(self):\n image = np.zeros((50, 70, 3), dtype=np.uint8)\n image[20-1:20+1, 30-1:30+1, 0] = 255\n image[30-1:30+1, 40-1:40+1, 1] = 255\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n image_aug = aug(image=image)\n\n x1 = np.argmax(np.max(image_aug[..., 0], axis=0))\n y1 = np.argmax(np.max(image_aug[..., 0], axis=1))\n x2 = np.argmax(np.max(image_aug[..., 1], axis=0))\n y2 = np.argmax(np.max(image_aug[..., 1], axis=1))\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert image_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_heatmaps_translation_x(self):\n hm = np.zeros((50, 70, 2), dtype=np.float32)\n hm[20-1:20+1, 30-1:30+1, 0] = 1.0\n hm[30-1:30+1, 40-1:40+1, 1] = 1.0\n hm = ia.HeatmapsOnImage(hm, shape=(50, 70, 3))\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n hm_aug = aug(heatmaps=hm)\n\n hm_aug_arr = hm_aug.get_arr()\n x1 = np.argmax(np.max(hm_aug_arr[..., 0], axis=0))\n y1 = np.argmax(np.max(hm_aug_arr[..., 0], axis=1))\n x2 = np.argmax(np.max(hm_aug_arr[..., 1], axis=0))\n y2 = np.argmax(np.max(hm_aug_arr[..., 1], axis=1))\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert hm_aug_arr.shape == (50, 70, 2)\n assert hm_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_segmentation_maps_translation_x(self):\n sm = np.zeros((50, 70, 2), dtype=np.int32)\n sm[20-1:20+1, 30-1:30+1, 0] = 1\n sm[30-1:30+1, 40-1:40+1, 1] = 2\n sm = ia.SegmentationMapsOnImage(sm, shape=(50, 70, 3))\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n sm_aug = aug(segmentation_maps=sm)\n\n sm_aug_arr = sm_aug.get_arr()\n x1 = np.argmax(np.max(sm_aug_arr[..., 0], axis=0))\n y1 = np.argmax(np.max(sm_aug_arr[..., 0], axis=1))\n x2 = np.argmax(np.max(sm_aug_arr[..., 1], axis=0))\n y2 = np.argmax(np.max(sm_aug_arr[..., 1], axis=1))\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert sm_aug_arr.shape == (50, 70, 2)\n assert sm_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_keypoints_translation_x(self):\n cbas = [ia.Keypoint(y=20, x=30), ia.Keypoint(y=30, x=40)]\n cbaoi = ia.KeypointsOnImage(cbas, shape=(50, 70, 3))\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n cbaoi_aug = aug(keypoints=cbaoi)\n\n x1 = cbaoi_aug.items[0].x\n y1 = cbaoi_aug.items[0].y\n x2 = cbaoi_aug.items[1].x\n y2 = cbaoi_aug.items[1].y\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert cbaoi_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_bounding_boxes_translation_x(self):\n cbas = [ia.BoundingBox(y1=20, x1=30, y2=20+2, x2=30+2),\n ia.BoundingBox(y1=30, x1=40, y2=30+2, x2=40+2)]\n cbaoi = ia.BoundingBoxesOnImage(cbas, shape=(50, 70, 3))\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n cbaoi_aug = aug(bounding_boxes=cbaoi)\n\n x1 = cbaoi_aug.items[0].x1\n y1 = cbaoi_aug.items[0].y1\n x2 = cbaoi_aug.items[1].x2\n y2 = cbaoi_aug.items[1].y2\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert cbaoi_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_polygons_translation_x(self):\n cbas = [ia.Polygon([(30, 20), (30+2, 20), (30+2, 20+2)]),\n ia.Polygon([(40, 30), (40+2, 30), (40+2, 30+2)])]\n cbaoi = ia.PolygonsOnImage(cbas, shape=(50, 70, 3))\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n cbaoi_aug = aug(polygons=cbaoi)\n\n x1 = cbaoi_aug.items[0].coords[0][0]\n y1 = cbaoi_aug.items[0].coords[0][1]\n x2 = cbaoi_aug.items[1].coords[2][0]\n y2 = cbaoi_aug.items[1].coords[2][1]\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert cbaoi_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_line_strings_translation_x(self):\n cbas = [ia.LineString([(30, 20), (30+2, 20), (30+2, 20+2)]),\n ia.LineString([(40, 30), (40+2, 30), (40+2, 30+2)])]\n cbaoi = ia.LineStringsOnImage(cbas, shape=(50, 70, 3))\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 15}))\n\n cbaoi_aug = aug(line_strings=cbaoi)\n\n x1 = cbaoi_aug.items[0].coords[0][0]\n y1 = cbaoi_aug.items[0].coords[0][1]\n x2 = cbaoi_aug.items[1].coords[2][0]\n y2 = cbaoi_aug.items[1].coords[2][1]\n\n # translation on x axis in polar representation should move all points\n # a bit away from the center\n min_diff = 4\n assert cbaoi_aug.shape == (50, 70, 3)\n assert x1 < 30 - min_diff\n assert y1 < 20 - min_diff\n assert x2 > 40 + min_diff\n assert y2 > 30 + min_diff\n\n def test_image_heatmap_alignment(self):\n image = np.zeros((80, 100, 3), dtype=np.uint8)\n image[40-10:40+10, 50-10:50+10, :] = 255\n hm = np.zeros((40, 50, 1), dtype=np.float32)\n hm[20-5:20+5, 25-5:25+5, :] = 1.0\n hm = ia.HeatmapsOnImage(hm, shape=image.shape)\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 10}))\n\n image_aug, hm_aug = aug(image=image, heatmaps=hm)\n\n hm_aug_arr = hm_aug.get_arr()\n hm_aug_arr_rs = ia.imresize_single_image(hm_aug_arr, (80, 100),\n interpolation=\"nearest\")\n overlap = np.average(\n (image_aug[..., 0] > 200)\n == (hm_aug_arr_rs[..., 0] > 0.9)\n )\n assert image_aug.shape == (80, 100, 3)\n assert hm_aug.shape == (80, 100, 3)\n assert hm_aug_arr.shape == (40, 50, 1)\n assert overlap > 0.96\n\n def test_image_segmentation_map_alignment(self):\n image = np.zeros((80, 100, 3), dtype=np.uint8)\n image[40-10:40+10, 50-10:50+10, :] = 255\n sm = np.zeros((40, 50, 1), dtype=np.int32)\n sm[20-5:20+5, 25-5:25+5, :] = 1\n sm = ia.SegmentationMapsOnImage(sm, shape=image.shape)\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 10}))\n\n image_aug, sm_aug = aug(image=image, segmentation_maps=sm)\n\n sm_aug_arr = sm_aug.get_arr()\n sm_aug_arr_rs = ia.imresize_single_image(sm_aug_arr, (80, 100),\n interpolation=\"nearest\")\n overlap = np.average(\n (image_aug[..., 0] > 200)\n == (sm_aug_arr_rs[..., 0] == 1)\n )\n assert image_aug.shape == (80, 100, 3)\n assert sm_aug.shape == (80, 100, 3)\n assert sm_aug_arr.shape == (40, 50, 1)\n assert overlap > 0.96\n\n def test_image_keypoint_alignment(self):\n image = np.zeros((80, 100, 3), dtype=np.uint8)\n image[40-10:40-10+3, 50-10:50-10+3, :] = 255\n image[40+10:40+10+3, 50+10:50+10+3, :] = 255\n\n kps = [ia.Keypoint(y=40-10+1.5, x=50-10+1.5),\n ia.Keypoint(y=40+10+1.5, x=50+10+1.5)]\n kpsoi = ia.KeypointsOnImage(kps, shape=image.shape)\n aug = iaa.WithPolarWarping(iaa.Affine(translate_px={\"x\": 10}))\n\n image_aug, kpsoi_aug = aug(image=image, keypoints=kpsoi)\n\n kp1 = kpsoi_aug.items[0]\n kp2 = kpsoi_aug.items[1]\n kp1_intensity = image_aug[int(kp1.y), int(kp1.x), 0]\n kp2_intensity = image_aug[int(kp2.y), int(kp2.x), 0]\n assert image_aug.shape == (80, 100, 3)\n assert kpsoi_aug.shape == (80, 100, 3)\n assert kp1_intensity > 200\n assert kp2_intensity > 200\n\n def test_image_is_noncontiguous(self):\n image = np.mod(np.arange(10*20*3), 255).astype(np.uint8)\n image = image.reshape((10, 20, 3))\n image_cp = np.fliplr(np.copy(image))\n image = np.fliplr(image)\n assert image.flags[\"C_CONTIGUOUS\"] is False\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n image_aug = aug(image=image)\n\n avg_dist = np.average(\n np.abs(\n image_aug.astype(np.int32)[2:-2, 2:-2]\n - image_cp.astype(np.int32)[2:-2, 2:-2]\n )\n )\n assert image_aug.shape == (10, 20, 3)\n assert avg_dist < 7.0\n\n def test_image_is_view(self):\n image = np.mod(np.arange(10*20*3), 255).astype(np.uint8)\n image = image.reshape((10, 20, 3))\n image_cp = np.copy(image)[2:, 2:, :]\n image = image[2:, 2:, :]\n assert image.flags[\"OWNDATA\"] is False\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n image_aug = aug(image=image)\n\n avg_dist = np.average(\n np.abs(\n image_aug.astype(np.int32)[2:-2, 2:-2]\n - image_cp.astype(np.int32)[2:-2, 2:-2]\n )\n )\n assert image_aug.shape == (8, 18, 3)\n assert avg_dist < 7.0\n\n def test_propagation_hooks(self):\n image = np.mod(np.arange(30*30), 255).astype(np.uint8)\n image = image.reshape((30, 30))\n aug = iaa.WithPolarWarping(iaa.Add(50))\n\n def _propagator(images, augmenter, parents, default):\n return False if augmenter is aug else default\n\n hooks = ia.HooksImages(propagator=_propagator)\n\n observed1 = aug.augment_image(image)\n observed2 = aug.augment_image(image, hooks=hooks)\n\n image_plus50 = np.clip(image.astype(np.int32)+50, 0, 255)\n diff1 = np.abs(observed1[2:-2].astype(np.int32)\n - image_plus50[2:-2].astype(np.int32))\n diff2 = np.abs(observed2[2:-2].astype(np.int32)\n - image_plus50[2:-2].astype(np.int32))\n overlap_1_add = np.average(diff1 <= 1)\n overlap_2_add = np.average(diff2 <= 2)\n assert overlap_1_add >= 0.9\n assert overlap_2_add < 0.01\n\n def test_unusual_channel_numbers(self):\n with assertWarns(self, iaa.SuspiciousSingleImageShapeWarning):\n shapes = [\n (5, 5, 4),\n (5, 5, 5),\n (5, 5, 512),\n (5, 5, 513)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n image_aug = aug(image=image)\n\n shape_expected = tuple([shape[1], shape[0]] + list(shape[2:]))\n assert np.all(image_aug == 0)\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape_expected\n\n def test_zero_sized_axes(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n image = np.zeros(shape, dtype=np.uint8)\n kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],\n shape=image.shape)\n sm_arr = np.zeros((3, 3), dtype=np.int32)\n sm_arr[1, 1] = 1\n sm = ia.SegmentationMapsOnImage(sm_arr, shape=image.shape)\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n aug_det = aug.to_deterministic()\n image_aug = aug_det(image=image)\n kpsoi_aug = aug_det(keypoints=kpsoi)\n sm_aug = aug_det(segmentation_maps=sm)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == shape\n assert np.allclose(kpsoi_aug.to_xy_array(),\n kpsoi.to_xy_array())\n assert kpsoi_aug.shape == shape\n assert np.array_equal(sm_aug.get_arr(), sm_arr)\n assert sm_aug.shape == shape\n\n def test_other_dtypes_bool(self):\n aug = iaa.WithPolarWarping(iaa.Noop())\n arr = np.zeros((20, 20), dtype=bool)\n arr[10-3:10+3, 10-3:10+3] = True\n\n arr_aug = aug(image=arr)\n\n overlap = np.average(arr_aug == arr)\n assert arr_aug.shape == (20, 20)\n assert arr_aug.dtype.name == \"bool\"\n assert overlap > 0.95\n\n def test_other_dtypes_uint_int(self):\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n dtypes = [\"uint8\", \"uint16\",\n \"int8\", \"int16\", \"int32\",]\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n center_value = int(center_value)\n\n image = np.zeros((30, 10), dtype=dtype)\n image[0:10, :] = min_value\n image[10:20, :] = center_value\n image[20:30, :] = max_value\n image = iaa.pad(image, top=2, right=2, bottom=2, left=2,\n cval=0)\n\n image_aug = aug.augment_image(image)\n image_aug = image_aug[2:-2, 2:-2]\n\n overlap_min = np.average(image_aug[0:10] == min_value)\n overlap_cv = np.average(image_aug[10:20] == center_value)\n overlap_max = np.average(image_aug[20:30] == max_value)\n assert image_aug.dtype.name == dtype\n assert overlap_min > 0.9\n assert overlap_cv > 0.9\n assert overlap_max > 0.9\n\n def test_other_dtypes_float(self):\n def _avg_close(arr_aug, expected_val):\n atol = 1e-8\n return np.average(np.isclose(arr_aug, expected_val,\n rtol=0, atol=atol))\n\n aug = iaa.WithPolarWarping(iaa.Noop())\n\n dtypes = [\"float16\", \"float32\", \"float64\"]\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n center_value = center_value\n\n image = np.zeros((70, 10), dtype=dtype)\n image[0:10, :] = min_value\n image[10:20, :] = center_value\n image[20:30, :] = max_value\n image[30:40, :] = -1.0\n image[40:50, :] = 1.0\n image[50:60, :] = -100.0\n image[60:70, :] = 100.0\n image = iaa.pad(image, top=2, right=2, bottom=2, left=2,\n cval=0)\n\n image_aug = aug.augment_image(image)\n image_aug = image_aug[2:-2, 2:-2]\n\n overlap1 = _avg_close(image_aug[0:10], min_value)\n overlap2 = _avg_close(image_aug[10:20], center_value)\n overlap3 = _avg_close(image_aug[20:30], max_value)\n overlap4 = _avg_close(image_aug[30:40], -1.0)\n overlap5 = _avg_close(image_aug[40:50], 1.0)\n overlap6 = _avg_close(image_aug[50:60], -100.0)\n overlap7 = _avg_close(image_aug[60:70], 100.0)\n assert image_aug.dtype.name == dtype\n assert overlap1 > 0.9\n assert overlap2 > 0.9\n assert overlap3 > 0.9\n assert overlap4 > 0.9\n assert overlap5 > 0.9\n assert overlap6 > 0.9\n assert overlap7 > 0.9\n\n def test_get_parameters(self):\n aug = iaa.WithPolarWarping(iaa.Noop())\n params = aug.get_parameters()\n assert len(params) == 0\n\n def test_get_children_lists(self):\n children = iaa.Sequential([iaa.Noop()])\n aug = iaa.WithPolarWarping(children)\n assert aug.get_children_lists() == [children]\n\n def test_to_deterministic(self):\n child = iaa.Identity()\n aug = iaa.WithPolarWarping([child])\n\n aug_det = aug.to_deterministic()\n\n assert aug_det.deterministic\n assert aug_det.random_state is not aug.random_state\n assert aug_det.children.deterministic\n assert aug_det.children[0].deterministic\n\n def test___repr___and___str__(self):\n children = iaa.Sequential([iaa.Noop()])\n aug = iaa.WithPolarWarping(children, name=\"WithPolarWarpingTest\")\n expected = (\n \"WithPolarWarping(\"\n \"name=WithPolarWarpingTest, \"\n \"children=%s, \"\n \"deterministic=False\"\n \")\" % (str(children),))\n\n assert aug.__repr__() == expected\n assert aug.__str__() == expected\n\n def test_pickleable(self):\n aug = iaa.WithPolarWarping(\n iaa.Affine(translate_px=(0, 10), seed=1),\n seed=2)\n runtest_pickleable_uint8_img(aug, iterations=5, shape=(25, 25, 1))\n\n\nclass Test_apply_jigsaw(unittest.TestCase):\n def test_no_movement(self):\n dtypes = [\n \"bool\",\n \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n \"int8\", \"int16\", \"int32\", \"int64\",\n \"float16\", \"float32\", \"float64\"\n ]\n\n try:\n dtypes.append(np.dtype(\"float128\"))\n except TypeError:\n pass # float128 not known on system\n\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n arr = np.arange(20*20*1).reshape((20, 20, 1))\n if dtype == \"bool\":\n mask = np.logical_or(\n arr % 4 == 0,\n arr % 7 == 0)\n arr[mask] = 1\n arr[~mask] = 0\n arr = arr.astype(dtype)\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n arr[0, 0] = min_value\n arr[0, 1] = max_value\n\n destinations = np.arange(5*5).reshape((5, 5))\n\n observed = iaa.apply_jigsaw(arr, destinations)\n\n if arr.dtype.kind != \"f\":\n assert np.array_equal(observed, arr)\n else:\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n assert np.allclose(observed, arr, rtol=0, atol=atol)\n\n def test_no_movement_zero_sized_axes(self):\n sizes = [\n (0, 1),\n (1, 0),\n (0, 0)\n ]\n\n dtype = \"uint8\"\n for size in sizes:\n with self.subTest(size=size):\n arr = np.zeros(size, dtype=dtype)\n destinations = np.arange(1*1).reshape((1, 1))\n\n observed = iaa.apply_jigsaw(arr, destinations)\n\n assert np.array_equal(observed, arr)\n\n def _test_two_cells_moved__n_channels(self, nb_channels):\n dtypes = [\n \"bool\",\n \"uint8\", \"uint16\", \"uint32\", \"uint64\",\n \"int8\", \"int16\", \"int32\", \"int64\",\n \"float16\", \"float32\", \"float64\"\n ]\n\n try:\n dtypes.append(np.dtype(\"float128\").name)\n except TypeError:\n pass # float128 not known by user system\n\n for dtype in dtypes:\n with self.subTest(dtype=dtype):\n c = 1 if nb_channels is None else nb_channels\n arr = np.arange(20*20*c)\n if dtype == \"bool\":\n mask = np.logical_or(\n arr % 4 == 0,\n arr % 7 == 0)\n arr[mask] = 1\n arr[~mask] = 0\n if nb_channels is not None:\n arr = arr.reshape((20, 20, c))\n else:\n arr = arr.reshape((20, 20))\n arr = arr.astype(dtype)\n min_value, center_value, max_value = \\\n iadt.get_value_range_of_dtype(dtype)\n arr[0, 0] = min_value\n arr[0, 1] = max_value\n\n destinations = np.arange(5*5).reshape((5, 5))\n destinations[0, 0] = 4 # cell 0 will be filled with 4\n destinations[0, 4] = 0 # cell 4 will be filled with 0\n destinations[0, 1] = 6 # cell 1 will be filled with 6\n destinations[1, 1] = 1 # cell 6 will be filled with 1\n\n observed = iaa.apply_jigsaw(arr, destinations)\n\n cell_0_obs = observed[0:4, 0:4]\n cell_0_exp = arr[0:4, 16:20]\n cell_4_obs = observed[0:4, 16:20]\n cell_4_exp = arr[0:4, 0:4]\n cell_1_obs = observed[0:4, 4:8]\n cell_1_exp = arr[4:8, 4:8]\n cell_6_obs = observed[4:8, 4:8]\n cell_6_exp = arr[0:4, 4:8]\n cell_2_obs = observed[0:4, 8:12]\n cell_2_exp = arr[0:4, 8:12]\n if arr.dtype.kind != \"f\":\n assert np.array_equal(cell_0_obs, cell_0_exp)\n assert np.array_equal(cell_4_obs, cell_4_exp)\n assert np.array_equal(cell_1_obs, cell_1_exp)\n assert np.array_equal(cell_6_obs, cell_6_exp)\n assert np.array_equal(cell_2_obs, cell_2_exp)\n else:\n atol = 1e-4 if dtype == \"float16\" else 1e-8\n kwargs = {\"rtol\": 0, \"atol\": atol}\n assert np.allclose(cell_0_obs, cell_0_exp, **kwargs)\n assert np.allclose(cell_4_obs, cell_4_exp, **kwargs)\n assert np.allclose(cell_1_obs, cell_1_exp, **kwargs)\n assert np.allclose(cell_6_obs, cell_6_exp, **kwargs)\n assert np.allclose(cell_2_obs, cell_2_exp, **kwargs)\n\n assert observed.shape == arr.shape\n assert observed.dtype.name == dtype\n\n def test_two_cells_moved__no_channels(self):\n self._test_two_cells_moved__n_channels(None)\n\n def test_two_cells_moved__1_channel(self):\n self._test_two_cells_moved__n_channels(1)\n\n def test_two_cells_moved__3_channels(self):\n self._test_two_cells_moved__n_channels(3)\n\n\nclass Test_apply_jigsaw_to_coords(unittest.TestCase):\n def test_no_movement(self):\n arr = np.float32([\n (0.0, 0.0),\n (5.0, 5.0),\n (25.0, 50.5),\n (10.01, 21.0)\n ])\n destinations = np.arange(10*10).reshape((10, 10))\n\n observed = iaa.apply_jigsaw_to_coords(arr, destinations, (50, 100))\n\n assert np.allclose(observed, arr)\n\n def test_with_movement(self):\n arr = np.float32([\n (0.0, 0.0), # in cell (0,0) = idx 0\n (5.0, 5.0), # in cell (0,0) = idx 0\n (25.0, 50.5), # in cell (5,2) = idx 52\n (10.01, 21.0) # in cell (2,1) = idx 12\n ])\n destinations = np.arange(10*10).reshape((10, 10))\n destinations[0, 0] = 1\n destinations[0, 1] = 0\n destinations[5, 2] = 7\n destinations[0, 7] = 52\n\n observed = iaa.apply_jigsaw_to_coords(arr, destinations, (100, 100))\n\n expected = np.float32([\n (10.0, 0.0),\n (15.0, 5.0),\n (75.0, 0.5),\n (10.01, 21.0)\n ])\n assert np.allclose(observed, expected)\n\n def test_with_movement_non_square_image(self):\n arr = np.float32([\n (0.5, 0.6), # in cell (0,0) = idx 0\n (180.7, 90.8), # in cell (9,9) = idx 99\n ])\n destinations = np.arange(10*10).reshape((10, 10))\n destinations[0, 0] = 99\n destinations[9, 9] = 0\n\n observed = iaa.apply_jigsaw_to_coords(arr, destinations, (100, 200))\n\n expected = np.float32([\n (180+0.5, 90+0.6),\n (0+0.7, 0+0.8)\n ])\n assert np.allclose(observed, expected)\n\n def test_empty_coords(self):\n arr = np.zeros((0, 2), dtype=np.float32)\n destinations = np.arange(10*10).reshape((10, 10))\n\n observed = iaa.apply_jigsaw_to_coords(arr, destinations, (100, 100))\n\n assert np.allclose(observed, arr)\n\n\nclass Test_generate_jigsaw_destinations(unittest.TestCase):\n def test_max_steps_0(self):\n rng = iarandom.RNG(0)\n max_steps = 0\n rows = 10\n cols = 20\n\n observed = iaa.generate_jigsaw_destinations(rows, cols, max_steps, rng,\n connectivity=8)\n\n assert np.array_equal(\n observed,\n np.arange(rows*cols).reshape((rows, cols)))\n\n def test_max_steps_1(self):\n rng = iarandom.RNG(0)\n max_steps = 1\n rows = 10\n cols = 20\n\n observed = iaa.generate_jigsaw_destinations(rows, cols, max_steps, rng,\n connectivity=8)\n\n yy = (observed // cols).reshape((rows, cols))\n xx = np.mod(observed, cols).reshape((rows, cols))\n yy_expected = np.tile(np.arange(rows).reshape((rows, 1)), (1, cols))\n xx_expected = np.tile(np.arange(cols).reshape((1, cols)), (rows, 1))\n\n yy_diff = yy_expected - yy\n xx_diff = xx_expected - xx\n dist = np.sqrt(yy_diff ** 2 + xx_diff ** 2)\n\n assert np.min(dist) <= 0.01\n assert np.any(dist >= np.sqrt(2) - 1e-4)\n assert np.max(dist) <= np.sqrt(2) + 1e-4\n\n def test_max_steps_1_connectivity_4(self):\n rng = iarandom.RNG(0)\n max_steps = 1\n rows = 10\n cols = 20\n\n observed = iaa.generate_jigsaw_destinations(rows, cols, max_steps, rng,\n connectivity=4)\n\n yy = (observed // cols).reshape((rows, cols))\n xx = np.mod(observed, cols).reshape((rows, cols))\n yy_expected = np.tile(np.arange(rows).reshape((rows, 1)), (1, cols))\n xx_expected = np.tile(np.arange(cols).reshape((1, cols)), (rows, 1))\n\n yy_diff = yy_expected - yy\n xx_diff = xx_expected - xx\n dist = np.sqrt(yy_diff ** 2 + xx_diff ** 2)\n\n assert np.min(dist) <= 0.01\n assert np.any(dist >= 0.99)\n assert np.max(dist) <= 1.01\n\n\nclass TestJigsaw(unittest.TestCase):\n def setUp(self):\n reseed()\n\n def test___init___defaults(self):\n aug = iaa.Jigsaw(nb_rows=1, nb_cols=2)\n assert aug.nb_rows.value == 1\n assert aug.nb_cols.value == 2\n assert aug.max_steps.value == 1\n assert aug.allow_pad is True\n\n def test___init___custom(self):\n aug = iaa.Jigsaw(nb_rows=1, nb_cols=2, max_steps=3, allow_pad=False)\n assert aug.nb_rows.value == 1\n assert aug.nb_cols.value == 2\n assert aug.max_steps.value == 3\n assert aug.allow_pad is False\n\n def test__draw_samples(self):\n aug = iaa.Jigsaw(nb_rows=(1, 5), nb_cols=(1, 6), max_steps=(1, 3))\n batch = mock.Mock()\n batch.nb_rows = 100\n\n samples = aug._draw_samples(batch, iarandom.RNG(0))\n\n assert len(np.unique(samples.nb_rows)) > 1\n assert len(np.unique(samples.nb_cols)) > 1\n assert len(np.unique(samples.max_steps)) > 1\n assert np.all(samples.nb_rows >= 1)\n assert np.all(samples.nb_rows <= 5)\n assert np.all(samples.nb_cols >= 1)\n assert np.all(samples.nb_cols <= 6)\n assert np.all(samples.max_steps >= 1)\n assert np.all(samples.max_steps <= 3)\n\n all_same = True\n first = samples.destinations[0]\n for dest in samples.destinations:\n this_same = (dest.shape == first.shape\n and np.array_equal(dest, first))\n all_same = all_same and this_same\n assert not all_same\n\n def test_images_without_shifts(self):\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)\n image = np.mod(np.arange(20*20*3), 255).astype(np.uint8)\n image = image.reshape((20, 20, 3))\n\n image_aug = aug(image=image)\n\n assert image_aug.dtype.name == \"uint8\"\n assert image_aug.shape == (20, 20, 3)\n assert np.array_equal(image_aug, image)\n\n def test_heatmaps_without_shifts(self):\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)\n arr = np.linspace(0, 1.0, 20*20*1).astype(np.float32)\n arr = arr.reshape((20, 20, 1))\n heatmap = ia.HeatmapsOnImage(arr, shape=(20, 20, 3))\n\n heatmap_aug = aug(heatmaps=heatmap)\n\n assert heatmap_aug.shape == (20, 20, 3)\n assert np.allclose(heatmap_aug.arr_0to1, heatmap.arr_0to1)\n\n def test_segmaps_without_shifts(self):\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)\n arr = np.zeros((20, 20, 1), dtype=np.int32)\n arr[0:10, :] = 1\n arr[10:20, 10:20] = 2\n arr = arr.reshape((20, 20, 1))\n segmap = ia.SegmentationMapsOnImage(arr, shape=(20, 20, 3))\n\n segmap_aug = aug(segmentation_maps=segmap)\n\n assert segmap_aug.shape == (20, 20, 3)\n assert np.array_equal(segmap_aug.arr, segmap.arr)\n\n def test_keypoints_without_shifts(self):\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=0)\n kpsoi = ia.KeypointsOnImage.from_xy_array([\n (0, 0),\n (5.5, 3.5),\n (12.1, 23.5)\n ], shape=(20, 20, 3))\n\n kpsoi_aug = aug(keypoints=kpsoi)\n\n assert kpsoi_aug.shape == (20, 20, 3)\n assert np.allclose(kpsoi_aug.to_xy_array(), kpsoi.to_xy_array())\n\n def test_images_with_shifts(self):\n # these rows/cols/max_steps parameters are mostly ignored due to the\n # mocked _draw_samples method below\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=1)\n image = np.mod(np.arange(19*19*3), 255).astype(np.uint8)\n image = image.reshape((19, 19, 3))\n destinations = np.array([\n [3, 1],\n [2, 0]\n ], dtype=np.int32)\n\n old_func = aug._draw_samples\n\n def _mocked_draw_samples(batch, random_state):\n samples = old_func(batch, random_state)\n return geometriclib._JigsawSamples(\n nb_rows=samples.nb_rows,\n nb_cols=samples.nb_cols,\n max_steps=samples.max_steps,\n destinations=[destinations])\n\n aug._draw_samples = _mocked_draw_samples\n\n image_aug = aug(image=image)\n\n expected = iaa.pad(image, bottom=1, right=1, cval=0)\n expected = iaa.apply_jigsaw(expected, destinations)\n assert np.array_equal(image_aug, expected)\n\n def test_heatmaps_with_shifts(self):\n # these rows/cols/max_steps parameters are mostly ignored due to the\n # mocked _draw_samples method below\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=1)\n arr = np.linspace(0, 1.0, 18*18*1).astype(np.float32)\n arr = arr.reshape((18, 18, 1))\n heatmap = ia.HeatmapsOnImage(arr, shape=(19, 19, 3))\n destinations = np.array([\n [3, 1],\n [2, 0]\n ], dtype=np.int32)\n\n old_func = aug._draw_samples\n\n def _mocked_draw_samples(batch, random_state):\n samples = old_func(batch, random_state)\n return geometriclib._JigsawSamples(\n nb_rows=samples.nb_rows,\n nb_cols=samples.nb_cols,\n max_steps=samples.max_steps,\n destinations=[destinations])\n\n aug._draw_samples = _mocked_draw_samples\n\n heatmap_aug = aug(heatmaps=heatmap)\n\n expected = ia.imresize_single_image(arr, (19, 19),\n interpolation=\"cubic\")\n expected = np.clip(expected, 0, 1.0)\n expected = iaa.pad(expected, bottom=1, right=1, cval=0.0)\n expected = iaa.apply_jigsaw(expected, destinations)\n expected = ia.imresize_single_image(expected, (18, 18),\n interpolation=\"cubic\")\n expected = np.clip(expected, 0, 1.0)\n assert np.allclose(heatmap_aug.arr_0to1, expected)\n\n def test_segmaps_with_shifts(self):\n # these rows/cols/max_steps parameters are mostly ignored due to the\n # mocked _draw_samples method below\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=1)\n arr = np.zeros((18, 18, 1), dtype=np.int32)\n arr[0:10, :] = 1\n arr[10:18, 10:18] = 2\n arr = arr.reshape((18, 18, 1))\n segmap = ia.SegmentationMapsOnImage(arr, shape=(19, 19, 3))\n destinations = np.array([\n [3, 1],\n [2, 0]\n ], dtype=np.int32)\n\n old_func = aug._draw_samples\n\n def _mocked_draw_samples(batch, random_state):\n samples = old_func(batch, random_state)\n return geometriclib._JigsawSamples(\n nb_rows=samples.nb_rows,\n nb_cols=samples.nb_cols,\n max_steps=samples.max_steps,\n destinations=[destinations])\n\n aug._draw_samples = _mocked_draw_samples\n\n segmap_aug = aug(segmentation_maps=segmap)\n\n expected = ia.imresize_single_image(arr, (19, 19),\n interpolation=\"nearest\")\n expected = iaa.pad(expected, bottom=1, right=1, cval=0)\n expected = iaa.apply_jigsaw(expected, destinations)\n expected = ia.imresize_single_image(expected, (18, 18),\n interpolation=\"nearest\")\n assert np.array_equal(segmap_aug.arr, expected)\n\n def test_keypoints_with_shifts(self):\n # these rows/cols/max_steps parameters are mostly ignored due to the\n # mocked _draw_samples method below\n aug = iaa.Jigsaw(nb_rows=5, nb_cols=5, max_steps=1)\n kpsoi = ia.KeypointsOnImage.from_xy_array([\n (0, 0),\n (5.5, 3.5),\n (4.0, 12.5),\n (11.1, 11.2),\n (12.1, 23.5)\n ], shape=(18, 18, 3))\n destinations = np.array([\n [3, 1],\n [2, 0]\n ], dtype=np.int32)\n\n old_func = aug._draw_samples\n\n def _mocked_draw_samples(batch, random_state):\n samples = old_func(batch, random_state)\n return geometriclib._JigsawSamples(\n nb_rows=samples.nb_rows,\n nb_cols=samples.nb_cols,\n max_steps=samples.max_steps,\n destinations=[destinations])\n\n aug._draw_samples = _mocked_draw_samples\n\n kpsoi_aug = aug(keypoints=kpsoi)\n\n expected = kpsoi.deepcopy()\n expected.shape = (20, 20, 3)\n # (0.0, 0.0) to cell at bottom-right, 1px pad at top and left\n expected.keypoints[0].x = 10.0 + (0.0 - 0.0) + 1.0\n expected.keypoints[0].y = 10.0 + (0.0 - 0.0) + 1.0\n # (5.5, 3.5) to cell at bottom-right, 1px pad at top and left\n expected.keypoints[1].x = 10.0 + (5.5 - 0.0) + 1.0\n expected.keypoints[1].y = 10.0 + (3.5 - 0.0) + 1.0\n # (4.0, 12.5) not moved to other cell, but 1px pad at top and left\n expected.keypoints[2].x = 4.0 + 1.0\n expected.keypoints[2].y = 12.5 + 1.0\n # (11.0, 11.0) to cell at top-left, 1px pad at top and left\n expected.keypoints[3].x = 0.0 + (11.1 - 10.0) + 1.0\n expected.keypoints[3].y = 0.0 + (11.2 - 10.0) + 1.0\n # (12.1, 23.5) not moved to other cell, but 1px pad at top and left\n expected.keypoints[4].x = 12.1 + 1.0\n expected.keypoints[4].y = 23.5 + 1.0\n expected.shape = (20, 20, 3)\n assert kpsoi_aug.shape == (20, 20, 3)\n assert np.allclose(kpsoi_aug.to_xy_array(), expected.to_xy_array())\n\n def test_images_and_heatmaps_aligned(self):\n nb_changed = 0\n rs = iarandom.RNG(0)\n for _ in np.arange(10):\n aug = iaa.Jigsaw(nb_rows=(2, 5), nb_cols=(2, 5), max_steps=(0, 3))\n image_small = rs.integers(0, 10, size=(10, 15)).astype(np.float32)\n image_small = image_small / 10.0\n image = ia.imresize_single_image(image_small, (20, 30),\n interpolation=\"cubic\")\n image = np.clip(image, 0, 1.0)\n hm = ia.HeatmapsOnImage(image_small, shape=(20, 30))\n\n images_aug, hms_aug = aug(images=[image, image, image],\n heatmaps=[hm, hm, hm])\n\n for image_aug, hm_aug in zip(images_aug, hms_aug):\n # TODO added squeeze here because get_arr() falsely returns\n # (H,W,1) for 2D inputs\n arr = np.squeeze(hm_aug.get_arr())\n image_aug_rs = ia.imresize_single_image(\n image_aug.astype(np.float32),\n arr.shape[0:2],\n interpolation=\"cubic\")\n image_aug_rs = np.clip(image_aug_rs, 0, 1.0)\n overlap = np.average(np.isclose(image_aug_rs, arr))\n\n assert overlap > 0.99\n if not np.array_equal(arr, hm.get_arr()):\n nb_changed += 1\n assert nb_changed > 5\n\n def test_images_and_segmaps_aligned(self):\n nb_changed = 0\n rs = iarandom.RNG(0)\n for _ in np.arange(10):\n aug = iaa.Jigsaw(nb_rows=(2, 5), nb_cols=(2, 5), max_steps=(0, 3))\n image_small = rs.integers(0, 10, size=(10, 15))\n image = ia.imresize_single_image(image_small, (20, 30),\n interpolation=\"nearest\")\n image = image.astype(np.uint8)\n segm = ia.SegmentationMapsOnImage(image_small, shape=(20, 30))\n\n images_aug, sms_aug = aug(images=[image, image, image],\n segmentation_maps=[segm, segm, segm])\n\n for image_aug, sm_aug in zip(images_aug, sms_aug):\n arr = sm_aug.get_arr()\n image_aug_rs = ia.imresize_single_image(\n image_aug, arr.shape[0:2], interpolation=\"nearest\")\n overlap = np.average(image_aug_rs == arr)\n\n assert overlap > 0.99\n if not np.array_equal(arr, segm.arr):\n nb_changed += 1\n assert nb_changed > 5\n\n def test_images_and_keypoints_aligned(self):\n for i in np.arange(20):\n aug = iaa.Jigsaw(nb_rows=(1, 3), nb_cols=(1, 3), max_steps=(2, 5),\n seed=i)\n # make sure that these coords are not exactly at a grid cell\n # border with any possibly sampled height/width in grid cells\n y = 17.5\n x = 25.5\n kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=x, y=y)],\n shape=(20, 30))\n image = np.zeros((20, 30), dtype=np.uint8)\n image[int(y), int(x)] = 255\n\n images_aug, kpsois_aug = aug(images=[image, image, image],\n keypoints=[kpsoi, kpsoi, kpsoi])\n\n for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):\n x_aug = kpsoi_aug.keypoints[0].x\n y_aug = kpsoi_aug.keypoints[0].y\n idx = np.argmax(image_aug)\n y_aug_img, x_aug_img = np.unravel_index(idx,\n image_aug.shape)\n dist = np.sqrt((x_aug - x_aug_img)**2 + (y_aug - y_aug_img)**2)\n # best possible distance is about 0.7 as KP coords are in cell\n # center and sampled coords are at cell top left\n assert dist < 0.8\n\n def test_no_error_for_1x1_grids(self):\n aug = iaa.Jigsaw(nb_rows=1, nb_cols=1, max_steps=2)\n image = np.mod(np.arange(19*19*3), 255).astype(np.uint8)\n image = image.reshape((19, 19, 3))\n kpsoi = ia.KeypointsOnImage.from_xy_array([\n (0, 0),\n (5.5, 3.5),\n (4.0, 12.5),\n (11.1, 11.2),\n (12.1, 23.5)\n ], shape=(19, 19, 3))\n\n image_aug, kpsoi_aug = aug(image=image, keypoints=kpsoi)\n\n assert np.array_equal(image_aug, image)\n assert np.allclose(kpsoi_aug.to_xy_array(), kpsoi.to_xy_array())\n\n def test_zero_sized_axes(self):\n shapes = [\n (0, 0),\n (0, 1),\n (1, 0),\n (0, 1, 0),\n (1, 0, 0),\n (0, 1, 1),\n (1, 0, 1)\n ]\n\n for shape in shapes:\n with self.subTest(shape=shape):\n for _ in sm.xrange(3):\n image = np.zeros(shape, dtype=np.uint8)\n aug = iaa.Jigsaw(nb_rows=2, nb_cols=2, max_steps=2)\n\n image_aug = aug(image=image)\n\n # (2, 2, [C]) here, because rows/cols are padded to be\n # multiple of nb_rows and nb_cols\n shape_exp = tuple([2, 2] + list(shape[2:]))\n assert image_aug.dtype.name == \"uint8\"\n assert np.array_equal(image_aug,\n np.zeros(shape_exp, dtype=np.uint8))\n\n def test_get_parameters(self):\n aug = iaa.Jigsaw(nb_rows=1, nb_cols=2)\n params = aug.get_parameters()\n assert params[0] is aug.nb_rows\n assert params[1] is aug.nb_cols\n assert params[2] is aug.max_steps\n assert params[3] is True\n\n def test_pickleable(self):\n aug = iaa.Jigsaw(nb_rows=(1, 4), nb_cols=(1, 4), max_steps=(1, 3))\n runtest_pickleable_uint8_img(aug, iterations=20, shape=(32, 32, 3))\n"
] |
[
[
"numpy.maximum",
"numpy.clip",
"numpy.tile",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.dtype",
"numpy.all",
"numpy.max",
"numpy.round",
"numpy.any",
"numpy.allclose",
"numpy.pad",
"numpy.clip",
"numpy.fliplr",
"numpy.arange",
"numpy.uint8",
"numpy.unique",
"numpy.full",
"numpy.copy",
"numpy.std",
"numpy.argmax",
"numpy.float32",
"numpy.zeros",
"numpy.unravel_index",
"numpy.isclose",
"numpy.rot90",
"numpy.nonzero",
"numpy.min",
"numpy.logical_or",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.array_equal",
"numpy.int32",
"numpy.tile",
"numpy.ones",
"numpy.float64",
"numpy.mod",
"numpy.average"
]
] |
StevePhan97/depth-estimation
|
[
"5500ea4b415ee9eef20ccb78a906dea577bde773"
] |
[
"dataloaders/dataloader.py"
] |
[
"import os\nimport os.path\nimport numpy as np\nimport torch.utils.data as data\nimport h5py\nimport dataloaders.transforms as transforms\n\ndef h5_loader(path, tranpose=False):\n h5f = h5py.File(path, \"r\")\n rgb = np.array(h5f['rgb'])\n rgb = np.transpose(rgb, (1, 2, 0))\n depth = np.array(h5f['depth'])\n return rgb, depth\n\n# def rgb2grayscale(rgb):\n# return rgb[:,:,0] * 0.2989 + rgb[:,:,1] * 0.587 + rgb[:,:,2] * 0.114\n\nclass MyDataloader(data.Dataset):\n modality_names = ['rgb']\n\n def is_image_file(self, filename):\n IMG_EXTENSIONS = ['.h5']\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n def find_classes(self, dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n def make_dataset(self, dir, class_to_idx):\n images = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if self.is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n # Path_to_class, index of class\n images.append(item)\n return images\n\n color_jitter = transforms.ColorJitter(0.4, 0.4, 0.4)\n\n def __init__(self, root, split, modality='rgb', loader=h5_loader):\n classes, class_to_idx = self.find_classes(root)\n imgs = self.make_dataset(root, class_to_idx)\n assert len(imgs)>0, \"Found 0 images in subfolders of: \" + root + \"\\n\"\n # print(\"Found {} images in {} folder.\".format(len(imgs), split))\n self.root = root\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n if split == 'train':\n self.transform = self.train_transform\n elif split == 'holdout':\n self.transform = self.val_transform\n elif split == 'val':\n self.transform = self.val_transform\n else:\n raise (RuntimeError(\"Invalid dataset split: \" + split + \"\\n\"\n \"Supported dataset splits are: train, val\"))\n self.loader = loader\n\n assert (modality in self.modality_names), \"Invalid modality split: \" + modality + \"\\n\" + \\\n \"Supported dataset splits are: \" + ''.join(self.modality_names)\n self.modality = modality\n\n def train_transform(self, rgb, depth):\n raise (RuntimeError(\"train_transform() is not implemented. \"))\n\n def val_transform(self, rgb, depth):\n raise (RuntimeError(\"val_transform() is not implemented.\"))\n\n def __getraw__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (rgb, depth) the raw data.\n \"\"\"\n path, target = self.imgs[index]\n rgb, depth = self.loader(path)\n return rgb, depth\n\n def __getitem__(self, index):\n rgb, depth = self.__getraw__(index)\n if rgb.ndim < 2 and depth.ndim != 2:\n print(\"Wrong DEPTH \",depth)\n return None\n if self.transform is not None:\n rgb_np, depth_np = self.transform(rgb, depth)\n else:\n raise(RuntimeError(\"transform not defined\"))\n\n # color normalization\n # rgb_tensor = normalize_rgb(rgb_tensor)\n # rgb_np = normalize_np(rgb_np)\n\n if self.modality == 'rgb':\n input_np = rgb_np\n\n to_tensor = transforms.ToTensor()\n input_tensor = to_tensor(input_np)\n while input_tensor.dim() < 3:\n input_tensor = input_tensor.unsqueeze(0)\n depth_tensor = to_tensor(depth_np)\n depth_tensor = depth_tensor.unsqueeze(0)\n\n return input_tensor, depth_tensor\n\n def __len__(self):\n return len(self.imgs)\n"
] |
[
[
"numpy.array",
"numpy.transpose"
]
] |
objectiv/objectiv-analytics
|
[
"86ec1508f71c2d61ea7d67479800e4dc417a46e1",
"86ec1508f71c2d61ea7d67479800e4dc417a46e1"
] |
[
"modelhub/tests_modelhub/functional/modelhub/logistic_regression_test_utils.py",
"bach/tests/functional/bach/test_df_scale.py"
] |
[
"\"\"\"\nCopyright 2021 Objectiv B.V.\n\"\"\"\n\n# Any import from modelhub initializes all the types, do not remove\n\n\n\nfrom typing import Iterable\nimport bach\nfrom sklearn.linear_model import LogisticRegression\nfrom modelhub import ModelHub\nimport numpy as np\n\n\nclass TestLR:\n \"\"\"\n Tests if model used on Bach gives the same outcome as model used on sklearn.\n Note that it only works for solvers that provide consistent outcomes.\n \"\"\"\n\n def __init__(self, X: bach.DataFrame, y: bach.Series, **kwargs):\n \"\"\"\n :param X: the independent variables.\n :param y: the target variable.\n :param **kwargs: any parameters that sklearn's LogisticRegression takes for instantiating the model.\n These parameters are used also for the model hub version of Logistic Regression.\n \"\"\"\n\n self.X = X\n self.y = y\n\n self.X_p = self.X.to_pandas()\n self.y_p = self.y.to_pandas()\n\n modelhub = ModelHub()\n\n self.sklearn_lr = LogisticRegression(**kwargs)\n self.modelhub_lr = modelhub.get_logistic_regression(**kwargs)\n\n self.sklearn_lr.fit(self.X_p, self.y_p)\n self.modelhub_lr.fit(self.X, self.y)\n\n def test_fitted_model(self):\n for key, value in self.sklearn_lr.__dict__.items():\n modelhub_value = getattr(self.modelhub_lr, key)\n print(f'testing {key}')\n print(f'modelhub value: {modelhub_value}\\nsklearn value : {value}\\n')\n result = modelhub_value == value\n if isinstance(result, Iterable):\n result = result.all()\n assert result\n\n def test_method(self, method_name, X=False, y=False, **kwargs):\n \"\"\"\n tests if modelhub outcome of method is the same as sklearn.\n \"\"\"\n\n if method_name not in ['decision_function',\n 'predict',\n 'predict_proba',\n 'score']:\n raise NotImplementedError(f\"method {method_name} not supported\")\n\n sklearn_args = tuple()\n modelhub_args = tuple()\n\n if X:\n sklearn_args = (self.X_p,)\n modelhub_args = (self.X,)\n if y:\n sklearn_args = (self.X_p, self.y_p)\n modelhub_args = (self.X, self.y)\n\n sklearn_method = getattr(self.sklearn_lr, method_name)\n modelhub_method = getattr(self.modelhub_lr, method_name)\n sklearn_data = sklearn_method(*sklearn_args, **kwargs)\n modelhub_data = modelhub_method(*modelhub_args, **kwargs)\n\n if method_name == 'predict_proba':\n # extract the probabilities from sklearn's predicted probabilities for the True class only:\n sklearn_data = sklearn_data[:, np.where(self.sklearn_lr.classes_)[0][0]]\n\n if method_name == 'score':\n equals = np.isclose(sklearn_data, modelhub_data)\n else:\n modelhub_data = modelhub_data.to_numpy()\n equals = np.isclose(sklearn_data, modelhub_data).all()\n\n assert equals, f\"modelhub_data: {modelhub_data} != sklearn_data: {sklearn_data}\"\n\n print(f\"test ok\")\n print(f'modelhub value: {modelhub_data}\\nsklearn value : {sklearn_data}\\n')\n\n",
"from sklearn.preprocessing import StandardScaler, MinMaxScaler\n\nfrom tests.functional.bach.test_data_and_utils import get_df_with_test_data, assert_equals_data, \\\n get_bt_with_test_data\nimport numpy as np\n\n\ndef test_standard_scale(engine) -> None:\n numerical_cols = ['skating_order', 'inhabitants', 'founding']\n all_cols = ['city'] + numerical_cols\n bt = get_df_with_test_data(engine, full_data_set=True)[all_cols]\n\n pdf = bt.to_pandas()\n\n so_values = bt.skating_order.to_numpy()\n so_avg = np.mean(so_values)\n so_std = np.var(so_values)\n so_scale = so_std ** 0.5\n\n inhbt_values = bt.inhabitants.to_numpy()\n inhbt_avg = np.mean(inhbt_values)\n inhbt_std = np.var(inhbt_values)\n inhbt_scale = inhbt_std ** 0.5\n\n fnd_values = bt.founding.to_numpy()\n fnd_avg = np.mean(fnd_values)\n fnd_std = np.var(fnd_values)\n fnd_scale = fnd_std ** 0.5\n\n expected_w_mean_std = StandardScaler(with_mean=True, with_std=True).fit_transform(pdf[numerical_cols])\n result_w_mean_std = bt.scale()\n\n np.testing.assert_almost_equal(expected_w_mean_std, result_w_mean_std[numerical_cols].to_numpy(), decimal=4)\n\n expected_data_w_mean_std = [\n [1, 'Ljouwert', (1 - so_avg) / so_scale, (93485 - inhbt_avg) / inhbt_scale, (1285 - fnd_avg) / fnd_scale],\n [2, 'Snits', (2 - so_avg) / so_scale, (33520 - inhbt_avg) / inhbt_scale, (1456 - fnd_avg) / fnd_scale],\n [3, 'Drylts', (3 - so_avg) / so_scale, (3055 - inhbt_avg) / inhbt_scale, (1268 - fnd_avg) / fnd_scale],\n [4, 'Sleat', (4 - so_avg) / so_scale, (700 - inhbt_avg) / inhbt_scale, (1426 - fnd_avg) / fnd_scale],\n [5, 'Starum', (5 - so_avg) / so_scale, (960 - inhbt_avg) / inhbt_scale, (1061 - fnd_avg) / fnd_scale],\n [6, 'Hylpen', (6 - so_avg) / so_scale, (870 - inhbt_avg) / inhbt_scale, (1225 - fnd_avg) / fnd_scale],\n [7, 'Warkum', (7 - so_avg) / so_scale, (4440 - inhbt_avg) / inhbt_scale, (1399 - fnd_avg) / fnd_scale],\n [8, 'Boalsert', (8 - so_avg) / so_scale, (10120 - inhbt_avg) / inhbt_scale, (1455 - fnd_avg) / fnd_scale],\n [9, 'Harns', (9 - so_avg) / so_scale, (14740 - inhbt_avg) / inhbt_scale, (1234 - fnd_avg) / fnd_scale],\n [10, 'Frjentsjer', (10 - so_avg) / so_scale, (12760 - inhbt_avg) / inhbt_scale, (1374 - fnd_avg) / fnd_scale],\n [11, 'Dokkum', (11 - so_avg) / so_scale, (12675 - inhbt_avg) / inhbt_scale, (1298 - fnd_avg) / fnd_scale],\n ]\n assert_equals_data(\n result_w_mean_std,\n expected_columns=['_index_skating_order', 'city', 'skating_order', 'inhabitants', 'founding'],\n expected_data=expected_data_w_mean_std,\n round_decimals=True,\n )\n\n expected_w_std = StandardScaler(with_mean=False, with_std=True).fit_transform(pdf[numerical_cols])\n result_w_std = bt.scale(with_mean=False, with_std=True)\n\n np.testing.assert_almost_equal(expected_w_std, result_w_std[numerical_cols].to_numpy(), decimal=4)\n\n expected_data_w_std = [\n [1, 'Ljouwert', 1 / so_scale, 93485 / inhbt_scale, 1285 / fnd_scale],\n [2, 'Snits', 2 / so_scale, 33520 / inhbt_scale, 1456 / fnd_scale],\n [3, 'Drylts', 3 / so_scale, 3055 / inhbt_scale, 1268 / fnd_scale],\n [4, 'Sleat', 4 / so_scale, 700 / inhbt_scale, 1426 / fnd_scale],\n [5, 'Starum', 5 / so_scale, 960 / inhbt_scale, 1061 / fnd_scale],\n [6, 'Hylpen', 6 / so_scale, 870 / inhbt_scale, 1225 / fnd_scale],\n [7, 'Warkum', 7 / so_scale, 4440 / inhbt_scale, 1399 / fnd_scale],\n [8, 'Boalsert', 8 / so_scale, 10120 / inhbt_scale, 1455 / fnd_scale],\n [9, 'Harns', 9 / so_scale, 14740 / inhbt_scale, 1234 / fnd_scale],\n [10, 'Frjentsjer', 10 / so_scale, 12760 / inhbt_scale, 1374 / fnd_scale],\n [11, 'Dokkum', 11 / so_scale, 12675 / inhbt_scale, 1298 / fnd_scale],\n ]\n assert_equals_data(\n result_w_std,\n expected_columns=['_index_skating_order', 'city', 'skating_order', 'inhabitants', 'founding'],\n expected_data=expected_data_w_std,\n round_decimals=True,\n )\n\n expected_w_mean = StandardScaler(with_mean=True, with_std=False).fit_transform(pdf[numerical_cols])\n result_w_mean = bt.scale(with_mean=True, with_std=False)\n\n np.testing.assert_almost_equal(expected_w_mean, result_w_mean[numerical_cols].to_numpy(), decimal=4)\n\n expected_data_w_mean = [\n [1, 'Ljouwert', 1 - so_avg, 93485 - inhbt_avg, 1285 - fnd_avg],\n [2, 'Snits', 2 - so_avg, 33520 - inhbt_avg, 1456 - fnd_avg],\n [3, 'Drylts', 3 - so_avg, 3055 - inhbt_avg, 1268 - fnd_avg],\n [4, 'Sleat', 4 - so_avg, 700 - inhbt_avg, 1426 - fnd_avg],\n [5, 'Starum', 5 - so_avg, 960 - inhbt_avg, 1061 - fnd_avg],\n [6, 'Hylpen', 6 - so_avg, 870 - inhbt_avg, 1225 - fnd_avg],\n [7, 'Warkum', 7 - so_avg, 4440 - inhbt_avg, 1399 - fnd_avg],\n [8, 'Boalsert', 8 - so_avg, 10120 - inhbt_avg, 1455 - fnd_avg],\n [9, 'Harns', 9 - so_avg, 14740 - inhbt_avg, 1234 - fnd_avg],\n [10, 'Frjentsjer', 10 - so_avg, 12760 - inhbt_avg, 1374 - fnd_avg],\n [11, 'Dokkum', 11 - so_avg, 12675 - inhbt_avg, 1298 - fnd_avg],\n ]\n assert_equals_data(\n result_w_mean,\n expected_columns=['_index_skating_order', 'city', 'skating_order', 'inhabitants', 'founding'],\n expected_data=expected_data_w_mean,\n round_decimals=True,\n )\n\n\ndef test_min_max_scale() -> None:\n numerical_cols = ['skating_order', 'inhabitants', 'founding']\n all_cols = ['city'] + numerical_cols\n bt = get_bt_with_test_data(full_data_set=True)[all_cols]\n pdf = bt.to_pandas()\n\n min_so = 1\n max_so = 11\n diff_so = max_so - min_so\n\n min_inh = 700\n max_inh = 93485\n diff_inh = max_inh - min_inh\n\n min_fnd = 1061\n max_fnd = 1456\n diff_fnd = max_fnd - min_fnd\n\n expected_default = MinMaxScaler().fit_transform(pdf[numerical_cols])\n result_default = bt.minmax_scale().sort_index()\n\n np.testing.assert_almost_equal(expected_default, result_default[numerical_cols].to_numpy(), decimal=4)\n\n expected_data_default = [\n [1, 'Ljouwert', (1 - min_so) / diff_so, (93485 - min_inh) / diff_inh, (1285 - min_fnd) / diff_fnd],\n [2, 'Snits', (2 - min_so) / diff_so, (33520 - min_inh) / diff_inh, (1456 - min_fnd) / diff_fnd],\n [3, 'Drylts', (3 - min_so) / diff_so, (3055 - min_inh) / diff_inh, (1268 - min_fnd) / diff_fnd],\n [4, 'Sleat', (4 - min_so) / diff_so, (700 - min_inh) / diff_inh, (1426 - min_fnd) / diff_fnd],\n [5, 'Starum', (5 - min_so) / diff_so, (960 - min_inh) / diff_inh, (1061 - min_fnd) / diff_fnd],\n [6, 'Hylpen', (6 - min_so) / diff_so, (870 - min_inh) / diff_inh, (1225 - min_fnd) / diff_fnd],\n [7, 'Warkum', (7 - min_so) / diff_so, (4440 - min_inh) / diff_inh, (1399 - min_fnd) / diff_fnd],\n [8, 'Boalsert', (8 - min_so) / diff_so, (10120 - min_inh) / diff_inh, (1455 - min_fnd) / diff_fnd],\n [9, 'Harns', (9 - min_so) / diff_so, (14740 - min_inh) / diff_inh, (1234 - min_fnd) / diff_fnd],\n [10, 'Frjentsjer', (10 - min_so) / diff_so, (12760 - min_inh) / diff_inh, (1374 - min_fnd) / diff_fnd],\n [11, 'Dokkum', (11 - min_so) / diff_so, (12675 - min_inh) / diff_inh, (1298 - min_fnd) / diff_fnd],\n ]\n assert_equals_data(\n result_default,\n expected_columns=['_index_skating_order', 'city', 'skating_order', 'inhabitants', 'founding'],\n expected_data=expected_data_default,\n round_decimals=True,\n )\n\n expected_w_fr = MinMaxScaler(feature_range=(2, 4)).fit_transform(pdf[numerical_cols])\n result_w_fr = bt.minmax_scale(feature_range=(2, 4)).sort_index()\n np.testing.assert_almost_equal(expected_w_fr, result_w_fr[numerical_cols].to_numpy(), decimal=4)\n\n expected_data_fr = []\n for row in expected_data_default:\n expected_data_fr.append([val if idx < 2 else val * 2 + 2 for idx, val in enumerate(row)])\n\n assert_equals_data(\n result_w_fr,\n expected_columns=['_index_skating_order', 'city', 'skating_order', 'inhabitants', 'founding'],\n expected_data=expected_data_fr,\n round_decimals=True,\n )\n"
] |
[
[
"numpy.where",
"sklearn.linear_model.LogisticRegression",
"numpy.isclose"
],
[
"numpy.var",
"sklearn.preprocessing.StandardScaler",
"numpy.mean",
"sklearn.preprocessing.MinMaxScaler"
]
] |
mkcor/covid-dashboard
|
[
"d4b2d6e15400a5f2ad729d0d38ace15f1c01c1bf"
] |
[
"make_figures.py"
] |
[
"\"\"\"\nUtility functions to generate plotly figures from dataframe. Called in app.py\n\"\"\"\n\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport numpy as np\nimport pandas as pd\nimport plotly.io as pio\nfrom plotly.validators.scatter.marker import SymbolValidator\n\nfrom data_input import normalize_by_population\n\npio.templates.default = \"plotly_white\"\n\nFIRST_LINE_HEIGHT = 600\n\nLABEL_FONT_SIZE = 18\n\n\ndef make_map(df, df_fatalities, df_recovered):\n \"\"\"\n Build figure with map of total number of cases\n\n Parameters\n ----------\n df: pandas DataFrame\n Our cases to plot\n pop: pandas DataFrame\n The population, used to normalize\n \"\"\"\n normalized_values = normalize_by_population(df)\n # Plot per Million individual\n normalized_values *= 1e6\n hovertemplate = ('<b>Country</b>:%{customdata[0]}<br>' +\n '<b>Active cases per million</b>: %{customdata[1]:.1f}<br>' +\n '<b>Active cases</b>: %{customdata[2]}<br>' +\n '<b>Fatalities</b>: %{customdata[3]}<br>' +\n '<b>Recovered</b>: %{customdata[4]}' \n )\n fig = px.choropleth(df, locations='iso',\n color=np.log10(normalized_values),\n custom_data=[df['country_region'], normalized_values,\n df['value'], df_fatalities['value'],\n df_recovered['value']],\n color_continuous_scale='Plasma_r',\n labels={'color': 'Active<br>cases<br>per<br>Million'})\n fig.update_layout(title='Click on map to add/remove a country',\n coloraxis_colorbar_tickprefix='1.e',\n coloraxis_colorbar_len=0.6,\n coloraxis_colorbar_title_font_size=LABEL_FONT_SIZE,\n margin=dict(l=0.03, r=0, b=0),\n height=FIRST_LINE_HEIGHT,\n geo_projection_scale=1.26)\n fig.update_traces(\n hovertemplate=hovertemplate,\n )\n return fig\n\n\ndef make_timeplot(df_measure, df_prediction):\n \"\"\"\n Build figure showing evolution of number of cases vs. time for all countries.\n The visibility of traces is set to 0 so that the interactive app will\n toggle the visibility.\n\n Parameters\n ----------\n df_measure: pandas DataFrame\n DataFrame of measured cases, created by :func:`data_input.get_data`, of wide format.\n\n df_prediction: pandas DataFrame\n DataFrame of predictions, with similar structure as df_measure\n \"\"\"\n # mode = 'confirmed'\n mode = 'active'\n df_measure_confirmed = df_measure[mode]\n colors = px.colors.qualitative.Dark24\n n_colors = len(colors)\n fig = go.Figure()\n hovertemplate_measure = '<b>%{meta}</b><br>%{x}<br>%{y:.0f}<extra></extra>'\n hovertemplate_prediction = '<b>%{meta}<br>prediction</b><br>%{x}<br>%{y:.0f}<extra></extra>'\n for i, country in enumerate(df_measure_confirmed.columns):\n fig.add_trace(go.Scatter(x=df_measure_confirmed.index,\n y=df_measure_confirmed[country],\n name=country[1], mode='markers+lines',\n marker_symbol = SymbolValidator().values[i],\n marker_color=colors[i%n_colors],\n line_color=colors[i%n_colors],\n meta=country[1],\n hovertemplate=hovertemplate_measure,\n visible=False))\n prediction = df_prediction['prediction']\n upper_bound = df_prediction['upper_bound']\n lower_bound = df_prediction['lower_bound']\n for i, country in enumerate(prediction.columns):\n # Do not plot predictions for a country with less than 50 cases\n if df_measure_confirmed[country][-1] < 50:\n continue\n fig.add_trace(go.Scatter(x=prediction.index,\n y=prediction[country],\n name='+' + country[1], mode='lines',\n line_dash='dash',\n line_color=colors[i%n_colors],\n showlegend=False,\n meta=country[1],\n hovertemplate=hovertemplate_prediction,\n visible=False))\n fig.add_trace(go.Scatter(x=upper_bound.index,\n y=upper_bound[country],\n name='+' + country[1], mode='lines',\n line_dash='dot',\n line_color=colors[i%n_colors],\n showlegend=False,\n visible=False,\n hoverinfo='skip',\n line_width=.8))\n fig.add_trace(go.Scatter(x=lower_bound.index,\n y=lower_bound[country],\n name='+' + country[1], mode='lines',\n line_dash='dot',\n line_color=colors[i%n_colors],\n showlegend=False,\n visible=False,\n hoverinfo='skip',\n line_width=.8))\n\n last_day = df_measure_confirmed.index.max()\n day = pd.DateOffset(days=1)\n fig.update_layout(title='',\n xaxis=dict(rangeslider_visible=True,\n range=(last_day - 10 * day,\n last_day + 4 * day)))\n fig.update_layout(\n showlegend=True,\n updatemenus=[\n dict(\n type = \"buttons\",\n direction = \"left\",\n buttons=list([\n dict(\n args=[{\"visible\": [False,]*len(df_measure_confirmed.columns)}],\n label=\"Reset\",\n method=\"update\",\n ),\n dict(\n args=[\"yaxis\", {'type':'log'}],\n label=\"log\",\n method=\"relayout\",\n ),\n dict(\n args=[\"yaxis\", {'type':'linear'}],\n label=\"lin\",\n method=\"relayout\",\n ),\n\n ]),\n pad={\"r\": 10, \"t\": 0, \"b\": 0},\n showactive=True,\n x=0.05,\n xanchor=\"left\",\n y=1.05,\n yanchor=\"top\",\n font_color='black',\n ),\n ],\n xaxis_tickfont_size=LABEL_FONT_SIZE - 4,\n yaxis_tickfont_size=LABEL_FONT_SIZE - 4,\n height=FIRST_LINE_HEIGHT,\n margin=dict(t=0, b=0.02),\n # The legend position + font size\n # See https://plot.ly/python/legend/#style-legend\n legend=dict(x=.05, y=.8, font_size=LABEL_FONT_SIZE,\n title=\"Active cases in\"),\n)\n return fig\n\n\nif __name__ == '__main__':\n from data_input import get_all_data, tidy_most_recent\n\n df, df_prediction = get_all_data()\n # most recent date, tidy format (one column for countries)\n df_tidy = tidy_most_recent(df)\n df_tidy_fatalities = tidy_most_recent(df, 'death')\n df_tidy_recovered = tidy_most_recent(df, 'recovered')\n\n fig1 = make_map(df_tidy, df_tidy_fatalities, df_tidy_recovered)\n fig2 = make_timeplot(df, df_prediction)\n"
] |
[
[
"numpy.log10",
"pandas.DateOffset"
]
] |
enric1994/PseudoLabeling
|
[
"f3122ae40773e40f4e9ff2e51b7e7daf7c6bb38d"
] |
[
"utils_pseudoLab/TwoSampler.py"
] |
[
"# Code obtained from:\n# https://github.com/CuriousAI/mean-teacher/blob/bd4313d5691f3ce4c30635e50fa207f49edf16fe/pytorch/mean_teacher/data.py\n\nimport itertools\nimport logging\nimport os.path\n\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data.sampler import Sampler\n\n\n\nclass TwoStreamBatchSampler(Sampler):\n \"\"\"Iterate two sets of indices\n An 'epoch' is one iteration through the primary indices.\n During the epoch, the secondary indices are iterated through\n as many times as needed.\n \"\"\"\n def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):\n self.primary_indices = primary_indices\n self.secondary_indices = secondary_indices\n self.secondary_batch_size = secondary_batch_size\n self.primary_batch_size = batch_size - secondary_batch_size\n\n assert len(self.primary_indices) >= self.primary_batch_size > 0\n assert len(self.secondary_indices) >= self.secondary_batch_size > 0\n\n def __iter__(self):\n primary_iter = iterate_once(self.primary_indices)\n secondary_iter = iterate_eternally(self.secondary_indices)\n return (\n primary_batch + secondary_batch\n for (primary_batch, secondary_batch)\n in zip(grouper(primary_iter, self.primary_batch_size),\n grouper(secondary_iter, self.secondary_batch_size))\n )\n\n def __len__(self):\n return len(self.primary_indices) // self.primary_batch_size\n\n\ndef iterate_once(iterable):\n return np.random.permutation(iterable)\n\n\ndef iterate_eternally(indices):\n def infinite_shuffles():\n while True:\n yield np.random.permutation(indices)\n return itertools.chain.from_iterable(infinite_shuffles())\n\n\ndef grouper(iterable, n):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3) --> ABC DEF\"\n args = [iter(iterable)] * n\n return zip(*args)\n"
] |
[
[
"numpy.random.permutation"
]
] |
Stonen2/MachineLearning
|
[
"f886c28876b7226d46ce4465c70b88a0f2fffa4e"
] |
[
"Project_4/GAParallel.py"
] |
[
"# this is the parallelized version of GA.py. Parallelized by Matteo Bjornsson, original code written by Nick Stone\n################################################################################\n\nimport random\nimport Performance\nfrom NeuralNetwork import NeuralNetwork\nimport DataUtility\nimport numpy as np\nimport copy\nimport multiprocessing\nimport traceback\n\nclass individual:\n \n def __init__(self):\n self.fitness = float('inf')\n #How big should each chromosome be? My initial assumption is the number of feature vectors across the board \n self.chromosome = [] # some numpy == weights\n self.Size = 0 \n\n\n def InitChromie(self,Feature_Size):\n #Loop through each index up until the number of features and just set it to 0 \n self.chromosome = [Feature_Size]\n for i in range(len(self.chromosome)): \n self.chromosome[i] = 0 \n self.chromosome = np.array(self.chromosome)\n self.Size = Feature_Size\n\n def setfit(self,fit): \n self.fitness = fit \n def getfit(self): \n return self.fitness \n def SetChromie(self,Chromos): \n self.chromosome = Chromos\n def SetSize(self,si): \n self.Size = si \n def getsize(self): \n return self.Size\n def getChromie(self): \n return self.chromosome \n\n def ReturnChromie(self):\n return self.chromosome\n\n def printChromie(self): \n for i in self.chromosome: \n print(i)\n\nclass GA:\n #####################\n # Initialize the population etc\n ####################\n #\n def __init__(self, hyperparameters:dict , Total_Weight:int ,NN):\n \n self.maxGen = hyperparameters[\"maxGen\"]\n self.pop_size = hyperparameters[\"pop_size\"]\n self.mutation_rate = hyperparameters[\"mutation_rate\"]\n self.mutation_range = hyperparameters[\"mutation_range\"]\n self.crossover_rate = hyperparameters[\"crossover_rate\"]\n self.generation = 0 \n #SEt the size to be the number of features \n self.Chromosome_Size = Total_Weight\n #Take in a neural Network \n self.nn = NN \n self.globalfit = list() \n \n #init general population \n #On the creation of a genetic algorithm, we should create a series of random weights in a numpy array that can be fed into the neural network. \n #Create an individual object and set the chromosome weight randomly for each of the individuals in the population (pop size)\n self.population = list()\n for i in range(self.pop_size): \n #Create a new individual object \n temp = individual()\n #Set the array size \n temp.SetSize(Total_Weight)\n #Initialize an empty list of weights 0s\n temp.InitChromie(Total_Weight)\n #Now randomly generate values to start for each of these sizes \n temp.SetChromie(self.GenerateWeights())\n #Add the individual to the list of total population \n self.population.append(temp)\n\n # random weight values, weight matrix is numpy array, matches network architecture\n # use similar weight init function as from NN\n self.bestChromie = self.population[0]\n\n\n #Generating the initial weights \n def GenerateWeights(self): \n # initialize weights randomly, close to 0\n # generate the matrices that hold the input weights for each layer. Maybe return a list of matrices?\n # will need 1 weight matrix for 0 hidden layers, 2 for 1 hidden layer, 3 for 2 hidden layer. \n layer_nodes = - 1\n layer_inputs = 1 \n weights = np.random.uniform(layer_nodes, layer_inputs,self.Chromosome_Size)\n return weights\n\n\n\n ########################################\n # Evaluate the fitness of an individual\n ########################################\n def fitness(self,) -> float:\n #Fitness Function will be Mean squared Error\n for i in self.population: \n fitscore = self.nn.fitness(i.getChromie()) \n i.setfit(fitscore)\n \n ########################################\n # Evaluate the fitness of an individual\n ########################################\n def pfitness(self,) -> float:\n print(\"FITNESS\")\n #Fitness Function will be Mean squared Error\n for i in self.population: \n fitscore = self.nn.fitness(i.getChromie()) \n print(fitscore)\n i.setfit(fitscore)\n \n\n ##################################\n # pick a subset of POP based ranked selection\n #####################################\n def selection(self):\n\n self.population = sorted(self.population, key=lambda individual: individual.fitness)\n bestChromie = self.population[0]\n self.globalfit.append(bestChromie.fitness)\n if bestChromie.fitness < self.bestChromie.fitness:\n self.bestChromie = bestChromie\n pop = self.pop_size\n\n # RANKED ROULETTE SELECTION\n newPopulation = list()\n Subset = int(pop / 2 )\n Subset = Subset + 1 \n for j in range(Subset): \n choice = random.random()\n sum = 0\n for i in range(pop):\n sum += 2/pop * (pop - (i+1))/(pop - 1)\n if sum > choice:\n newPopulation.append(self.population[i])\n break\n\n self.population = newPopulation\n\n \n ####################################\n # make new generation based on parent selection by swapping chromosomes \n ####################################\n def crossover(self): \n self.generation = self.generation + 1\n NewPop = list() \n #{01 12 23 34 }\n #TODO: pick crossover mechanism (uniform?)\n for i in range(len(self.population)-1): \n\n NewChromoC1 = list()\n NewChromoC2 = list() \n\n Parent1 = self.population[i]\n Parent2 = self.population[i+1]\n \n Child1 = individual()\n Child2 = individual()\n \n Child1.InitChromie(Parent1.getsize())\n Child2.InitChromie(Parent2.getsize())\n \n for i in range(Parent1.getsize()):\n score = random.random()\n if score > self.crossover_rate: \n bit = Parent1.getChromie()\n bit = bit[i]\n bit2 = Parent2.getChromie()\n bit2 = bit2[i]\n else: \n bit = Parent2.getChromie()\n bit = bit[i]\n bit2 = Parent1.getChromie()\n bit2 = bit2[i]\n NewChromoC1.append(bit)\n NewChromoC2.append(bit2)\n NewChromoC1 = np.array(NewChromoC1)\n NewChromoC2 = np.array(NewChromoC2)\n Child1.SetChromie(NewChromoC1)\n Child2.SetChromie(NewChromoC2)\n NewPop.append(Child1)\n NewPop.append(Child2)\n self.population = NewPop\n \n while(len(self.population) > self.pop_size): \n Kill = random.randint(0,len(self.population))\n self.population.remove(self.population[Kill])\n self.mutate()\n\n\n \n\n ###################################\n # introduce random change to each individual in the generation\n ###############################\n def mutate(self):\n for i in self.population:\n perc = random.random()\n if perc > self.mutation_rate: \n continue \n else: \n bit = random.randint(0,len(i.getChromie())-1)\n temp = i.getChromie()\n temp[bit] = random.uniform(-self.mutation_range,self.mutation_range)\n i.SetChromie(temp) \n \n\n\ndef driver(q, ds: str, data_package: list, regression: bool, perf: Performance, hidden_layers: list, hyper_params: dict, count: int, total_counter:int, total: int):\n print(\"Job \", ds, count, \"started\")\n try:\n # init all test data values\n test_data, test_labels, training_data, training_labels, output_size, input_size = data_package\n layers = [input_size] + hidden_layers + [output_size]\n\n # init neural network\n nn = NeuralNetwork(input_size, hidden_layers, regression, output_size)\n nn.set_input_data(training_data, training_labels)\n\n total_weights = 0 \n for i in range(len(layers)-1):\n total_weights += layers[i] * layers[i+1]\n \n #self, hyperparameters:dict , Total_Weight:int ,NN\n ga = GA(hyper_params,total_weights, nn)\n # plt.ion\n for gen in range(ga.maxGen): \n ga.fitness()\n ga.selection()\n ga.crossover()\n \n # get best overall solution and set the NN weights\n bestSolution = ga.bestChromie.getChromie()\n bestWeights = ga.nn.weight_transform(bestSolution)\n ga.nn.weights = bestWeights\n\n # pass the test data through the trained NN\n results = classify(test_data, test_labels, regression, ga, perf)\n # headers = [\"Data set\", \"layers\", \"pop\", \"Beta\", \"CR\", \"generations\", \"loss1\", \"loss2\"]\n\n Meta = [\n ds, \n len(hidden_layers), \n hyper_params[\"maxGen\"], \n hyper_params[\"pop_size\"], \n hyper_params[\"mutation_rate\"],\n hyper_params[\"mutation_range\"],\n hyper_params[\"crossover_rate\"]\n ]\n results_performance = perf.LossFunctionPerformance(regression, results) \n data_point = Meta + results_performance\n data_point_string = ','.join([str(x) for x in data_point])\n # put the result on the multiprocessing queue\n q.put(data_point_string)\n print(f\"{ds} {count}/{int(total/6)}. {total_counter}/{total}\")\n except Exception as e:\n print('Caught exception in worker thread')\n\n # This prints the type, value, and stack trace of the\n # current exception being handled.\n traceback.print_exc()\n\n print()\n raise e\n\ndef generate_data_package(fold: int, tenfolds: list, regression: bool, du: DataUtility):\n test_data, test_labels = copy.deepcopy(tenfolds[fold])\n remaining_data = [x[0] for i, x in enumerate(copy.deepcopy(tenfolds)) if i!=fold]\n remaining_labels = [y[1] for i, y in enumerate(copy.deepcopy(tenfolds)) if i!=fold]\n #Store off a set of the remaining dataset \n training_data = np.concatenate(remaining_data, axis=1) \n #Store the remaining data set labels \n training_labels = np.concatenate(remaining_labels, axis=1)\n \n if regression == True:\n #The number of output nodes is 1 \n output_size = 1\n #else it is a classification data set \n else:\n #Count the number of classes in the label data set \n output_size = du.CountClasses(training_labels)\n #Get the test data labels in one hot encoding \n test_labels = du.ConvertLabels(test_labels, output_size)\n #Get the Labels into a One hot encoding \n training_labels = du.ConvertLabels(training_labels, output_size)\n\n input_size = training_data.shape[0]\n return [test_data, test_labels, training_data, training_labels, output_size, input_size]\n\ndef classify(test_data: np.ndarray, test_labels: np.ndarray, regression: bool, ga: GA, perf: Performance):\n estimates = ga.nn.classify(test_data, test_labels)\n if regression == False: \n #Decode the One Hot encoding Value \n estimates = ga.nn.PickLargest(estimates)\n ground_truth = ga.nn.PickLargest(test_labels)\n else: \n estimates = estimates.tolist()\n ground_truth = test_labels.tolist()[0]\n estimates = estimates[0]\n results = perf.ConvertResultsDataStructure(ground_truth, estimates)\n return results\n\n# this function takes the results from the queue that all async jobs write to, and\n# writes the jobs to disk. This function is meant to be started as it's own process.\n# param q is the multiprocess Manager queue object shared by all jobs. \ndef data_writer(q, filename):\n while True:\n with open(filename, 'a') as f:\n data_string = q.get()\n if data_string == 'kill':\n f.write('\\n')\n break\n f.write(data_string + '\\n')\n\nif __name__ == '__main__':\n\n headers = [\"Data set\", \"layers\", \"maxGen\", \"pop_size\", \"mutation_rate\", \"mutation_range\", \"crossover_rate\", \"loss1\", \"loss2\"]\n filename = 'GA_results.csv'\n\n Per = Performance.Results()\n Per.PipeToFile([], headers, filename)\n\n data_sets = [\"soybean\", \"glass\",\"Cancer\",\"forestfires\", \"machine\", \"abalone\"] \n\n regression_data_set = {\n \"soybean\": False,\n \"Cancer\": False,\n \"glass\": False,\n \"forestfires\": True,\n \"machine\": True,\n \"abalone\": True\n }\n categorical_attribute_indices = {\n \"soybean\": [],\n \"Cancer\": [],\n \"glass\": [],\n \"forestfires\": [],\n \"machine\": [],\n \"abalone\": []\n }\n\n tuned_0_hl = {\n \"soybean\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .2,\n \"hidden_layer\": []\n },\n \"Cancer\": {\n \"mutation_rate\": .8,\n \"crossover_rate\": .5,\n \"hidden_layer\": []\n },\n \"glass\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .2,\n \"hidden_layer\": []\n },\n \"forestfires\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .5,\n \"hidden_layer\": []\n },\n \"machine\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .2,\n \"hidden_layer\": []\n },\n \"abalone\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .5,\n \"hidden_layer\": []\n }\n }\n\n tuned_1_hl = {\n \"soybean\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .2,\n \"hidden_layer\": [7]\n },\n \"Cancer\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .2,\n \"hidden_layer\": [4]\n },\n \"glass\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .2,\n \"hidden_layer\": [8]\n },\n \"forestfires\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .2,\n \"hidden_layer\": [8]\n },\n \"machine\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .2,\n \"hidden_layer\": [4]\n },\n \"abalone\": {\n \"mutation_rate\": .8,\n \"crossover_rate\": .2,\n \"hidden_layer\": [8]\n }\n }\n\n tuned_2_hl = {\n \"soybean\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .2,\n \"hidden_layer\": [7,12]\n },\n \"Cancer\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .2,\n \"hidden_layer\": [4,4]\n },\n \"glass\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .5,\n \"hidden_layer\": [8,6]\n },\n \"forestfires\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .5,\n \"hidden_layer\": [8,8]\n },\n \"machine\": {\n \"mutation_rate\": .5,\n \"crossover_rate\": .5,\n \"hidden_layer\": [7,2]\n },\n \"abalone\": {\n \"mutation_rate\": .2,\n \"crossover_rate\": .2,\n \"hidden_layer\": [6,8]\n }\n }\n ##############################################\n # START MULTIPROCESS JOB POOL\n ##############################################\n manager = multiprocessing.Manager()\n q = manager.Queue()\n writer = multiprocessing.Process(target=data_writer, args=(q,filename))\n writer.start()\n\n pool = multiprocessing.Pool()\n ##############################################\n\n du = DataUtility.DataUtility(categorical_attribute_indices, regression_data_set)\n total_counter = 0\n for data_set in data_sets:\n if data_set != \"abalone\": continue\n regression = regression_data_set[data_set]\n tuned_parameters = [tuned_0_hl[data_set], tuned_1_hl[data_set], tuned_2_hl[data_set]]\n\n data_set_counter = 0\n # ten fold data and labels is a list of [data, labels] pairs, where \n # data and labels are numpy arrays:\n tenfold_data_and_labels = du.Dataset_and_Labels(data_set)\n\n for j in range(10):\n data_package = generate_data_package(fold=j, tenfolds=tenfold_data_and_labels, regression=regression, du=du)\n\n for z in range(3):\n if z != 2: continue\n hidden_layers = tuned_parameters[z][\"hidden_layer\"]\n\n # these are the parameters that were tuned:\n ############################################\n # popss =[100] # paper suggests 10 * total weight\n # bet = [.5,.8,.2] # note suggested from paper: [.5 , 1]\n # cr = [.1, .3, .8] # note suggested from paper: cr from [0,.3], [.8, 1] if not converging\n # maxgen = [500]\n\n total_trials = 180\n\n hyperparameters = {\n \"maxGen\":500,\n \"pop_size\":500,\n \"mutation_rate\": tuned_parameters[z][\"mutation_rate\"],\n \"mutation_range\": 10,\n \"crossover_rate\": tuned_parameters[z][\"crossover_rate\"] \n }\n\n\n pool.apply_async(driver, args=(\n q, # queue\n data_set, \n data_package,\n regression,\n Per,\n hidden_layers,\n hyperparameters,\n data_set_counter,\n total_counter,\n total_trials\n ))\n data_set_counter += 1\n total_counter += 1\n\n ##############################\n # CLOSE THE MULTIPROCESS POOL\n ##############################\n pool.close()\n pool.join()\n q.put('kill')\n writer.join()\n\n"
] |
[
[
"numpy.concatenate",
"numpy.random.uniform",
"numpy.array"
]
] |
BUVANEASH/AdaConv
|
[
"2e714b47f1e4051e3f7b58bbee00052404e51f6a"
] |
[
"styletransfer/conv.py"
] |
[
"import tensorflow as tf\n\nclass Conv2D(tf.keras.layers.Layer):\n \"\"\" Conv2D\n \"\"\"\n def __init__(self, **kwargs):\n super(Conv2D, self).__init__(name = kwargs.get('name','Conv2D'))\n self.filters = kwargs.get('filters')\n self.kernels = kwargs.get('kernels',3)\n self.strides = kwargs.get('strides',1)\n self.padding_values = tf.constant([[0,0], \n [(self.kernels[0]-1)//2,(self.kernels[0]-1)//2], \n [(self.kernels[1]-1)//2,(self.kernels[1]-1)//2], \n [0,0]])\n self.conv2d = tf.keras.layers.Conv2D(filters = self.filters,\n kernel_size = self.kernels, \n strides = self.strides, \n padding = \"VALID\", \n name = 'Conv2D')\n\n def call(self, inputs, training = False):\n \n x = inputs\n x = tf.pad(x, self.padding_values, mode = \"REFLECT\")\n x = self.conv2d(x, training = training)\n \n return x\n\nclass SubPixelConv2D(tf.keras.layers.Layer):\n \"\"\" SubPixelConv2D\n \"\"\"\n def __init__(self, **kwargs):\n super(SubPixelConv2D, self).__init__(name = kwargs.get('name','SubPixelConv2D'))\n self.filters = kwargs.get('filters')\n self.r = kwargs.get('r',2)\n self.kernels = kwargs.get('kernels',3)\n self.strides = kwargs.get('strides',1)\n self.padding_values = tf.constant([[0,0], \n [(self.kernels[0]-1)//2,(self.kernels[0]-1)//2], \n [(self.kernels[1]-1)//2,(self.kernels[1]-1)//2], \n [0,0]])\n self.conv2d = tf.keras.layers.Conv2D(filters = self.filters * self.r * self.r, \n kernel_size = self.kernels, \n strides = self.strides, \n padding = \"VALID\", \n name = 'Conv2D')\n\n def call(self, inputs, training = False):\n \n x = inputs\n x = tf.pad(x, self.padding_values, mode = \"REFLECT\")\n x = self.conv2d(x, training = training)\n x = tf.nn.depth_to_space(x, block_size = self.r)\n \n return x"
] |
[
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.constant",
"tensorflow.pad",
"tensorflow.nn.depth_to_space"
]
] |
johncoltrane1/saferGPMLE
|
[
"b86fbd329eaad0b6374a1b28cae43b2a7f81eb61"
] |
[
"safergpy/code/report/nll_boxplot.py"
] |
[
"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport numpy as np\nimport sys\n\n\n# --- README ---\n\n'''\nThis script generates boxplots for NLL differences of default & healed\nas obtained with LOO\n'''\n\n# --- To Run ---\n\n'''\nSyntax :\n\npython nll_boxplot.py bench_num scheme1 scheme2 dataset_name dimension\n\nExample :\n\npython3 nll_boxplot.py 2 gpy_mle0133 gpy_mle3021 g10 3d\n'''\n\n\n# --- Methods ---\n\nbench_num = sys.argv[1]\nmethod = sys.argv[2]\nmethod1 = sys.argv[3]\ndataset = [str(sys.argv[4]), sys.argv[5]]\nprint('generating box plots for : \\n', [method, method1])\n\n# --- File name parsing utilities ---\n\n\ndef get_problem_and_dimension(file):\n splited_file_name = file.split('_')\n\n problem = \"_\".join(file.split('_')[0:(len(splited_file_name) - 1)])\n d = splited_file_name[len(splited_file_name) - 1].replace('.csv', '')\n\n return problem, d\n\n# --- Let's do the job ---\n\n\ndata_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench2', 'data_no_std', str(method1))\n\ndata_dir_full = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench1', 'proposed', str(method1))\n\ndata_dir_healed = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench2', 'data_no_std', str(method))\n\ndata_dir_full_healed = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench1', 'proposed', str(method))\n\n\n# -- Retrieve data from methods ---\n\ncost_dict = {}\ncost_dict_healed = {}\ncost_dict_full = {}\ncost_dict_full_healed = {}\n\nfor file in os.listdir(data_dir):\n problem, d = get_problem_and_dimension(file)\n\n if [problem, d] == dataset:\n\n data = pd.read_csv(os.path.join(data_dir, file), sep=',', index_col=0)[['cost', 'output']]\n df = pd.DataFrame(data)\n\n for output in list(df['output']):\n cost_dict[output] = list(df.loc[df['output'] == output]['cost'])\n\nfor file in os.listdir(data_dir_healed):\n problem, d = get_problem_and_dimension(file)\n\n if [problem, d] == dataset:\n\n data = pd.read_csv(os.path.join(data_dir_healed, file), sep=',', index_col=0)[['cost', 'output']]\n df = pd.DataFrame(data)\n\n for output in list(df['output']):\n cost_dict_healed[output] = list(df.loc[df['output'] == output]['cost'])\n\n\nfor file in os.listdir(data_dir_full):\n problem, d = get_problem_and_dimension(file)\n\n if [problem, d] == dataset:\n\n data_full = pd.read_csv(os.path.join(data_dir_full, file), sep=',', index_col=0)[['cost', 'output']]\n df_full = pd.DataFrame(data_full)\n\n for output in list(df_full['output']):\n cost_dict_full[output] = list(df_full.loc[df_full['output'] == output]['cost'])\n\nfor file in os.listdir(data_dir_full_healed):\n problem, d = get_problem_and_dimension(file)\n\n if [problem, d] == dataset:\n\n data_full = pd.read_csv(os.path.join(data_dir_full_healed, file), sep=',', index_col=0)[['cost', 'output']]\n df_full = pd.DataFrame(data_full)\n\n for output in list(df_full['output']):\n cost_dict_full_healed[output] = list(df_full.loc[df_full['output'] == output]['cost'])\n\n\n# --- Box plot ---\n\nfig = plt.figure(1, figsize=(9, 6))\n\nax = fig.add_subplot(111)\n\nto_plot = []\nto_plot_full = []\nfor i in list(df_full['output']):\n # print('\\n\\n{}'.format(i))\n temp = np.array(cost_dict[i]) - np.array(cost_dict_healed[i])\n temp = temp[~np.isnan(temp)]\n to_plot.append(temp)\n to_plot_full.append(np.array(cost_dict_full[i]) - np.array(cost_dict_full_healed[i]))\n\nplot_type = input('Type \"h\" for histogram : \\n')\n\nif plot_type == 'h':\n\n plt.hist(to_plot, density=False, bins=10, edgecolor='black')\n plt.xlabel('NLL_default - NLL_healed', fontsize=14)\n plt.ylabel('frequency', fontsize=14)\n plt.title('Histogram of NLL differences', fontsize=14)\n plt.show()\n\nelse:\n\n bp = ax.boxplot(to_plot)\n bp1 = ax.boxplot(to_plot_full)\n\n # change color and linewidth of the medians\n for median in bp['medians']:\n median.set(color='g', linewidth=2)\n\n # change outline color, fill color and linewidth of the boxes\n for box in bp1['boxes']:\n # change outline color\n box.set(color='r', linewidth=2)\n\n # change color and linewidth of the whiskers\n for whisker in bp1['whiskers']:\n whisker.set(color='r', linewidth=2)\n\n # change color and linewidth of the caps\n for cap in bp1['caps']:\n cap.set(color='r', linewidth=2)\n\n for median in bp1['medians']:\n median.set(color='r', linewidth=2)\n\n plt.xlabel('output functions', fontsize=14)\n plt.ylabel('NLL_default - NLL_healed', fontsize=14)\n plt.title('Boxplot of difference in LOO estimated NLL of {}'.format(dataset[0] + '_' + str(dataset[1])), fontsize=14)\n plt.grid(True)\n ax.set_xticklabels(list(df_full['output']))\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.title",
"numpy.isnan",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] |
Gustoaxel/Clinic_Image_autoencoder
|
[
"cb360f80525b22e412bc5e6d6e03d7725cd0d357"
] |
[
"src/testMaskClassi.py"
] |
[
"import os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\nimport yaml\nimport argparse\nfrom pathlib import Path\n\nimport numpy as np\nimport torch as T\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision.datasets as datasets\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport torchvision\n\nfrom data_loader import ImagePlace\nfrom utils import save_imgs\nfrom smoothing import smooth\n\nfrom namespace import Namespace\nfrom logger import Logger\n\nfrom models.cae_32x32x32_zero_pad_bin_classi import CAE\nimport matplotlib.pyplot as plt\nfrom functools import reduce\nfrom tqdm import tqdm\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\n\nlogger = Logger(__name__, colorize=True)\n\ndef ShowPcaTsne(data_encoder, labels ,exp_dir,running_accuracy , train_len):\n \"\"\" Visualization with PCA and Tsne\n Args:\n X: numpy - original imput matrix\n Y: numpy - label matrix \n data_encoder: tensor - latent sapce output, encoded data \n center_distance: numpy - center_distance matrix\n class_len: int - number of class \n Return:\n Non, just show results in 2d space \n \"\"\" \n \n # Define the color list for plot\n color = ['#1F77B4', '#FF7F0E', '#2CA02C', '#D62728', '#9467BD','#8C564B', '#E377C2', '#BCBD22', '#17BECF', '#40004B','#762A83',\\\n '#9970AB', '#C2A5CF', '#E7D4E8', '#F7F7F7','#D9F0D3', '#A6DBA0', '#5AAE61', '#1B7837', '#00441B','#8DD3C7', '#FFFFB3',\\\n '#BEBADA', '#FB8072', '#80B1D3','#FDB462', '#B3DE69', '#FCCDE5', '#D9D9D9', '#BC80BD','#CCEBC5', '#FFED6F']\n \n # Do pca for original data\n pca = PCA(n_components= 2)\n\n \n # Do pca for encoder data if cluster>2\n if data_encoder.shape[1] !=3: # layer code_size >2 (3= 2+1 data+labels) \n data_encoder_pca = data_encoder[:,:]\n X_encoder_pca = pca.fit(data_encoder_pca).transform(data_encoder_pca)\n X_encoder_tsne = TSNE(n_components=2).fit_transform(data_encoder_pca)\n Y_encoder_pca = labels.detach().cpu().numpy().astype(int)\n else:\n X_encoder_pca = data_encoder[:,:]\n X_encoder_tsne = X_encoder_pca \n Y_encoder_pca = labels.detach().cpu().numpy().astype(int)\n color_encoder = [color[i] for i in Y_encoder_pca ]\n \n # Do pca for center_distance\n #labels = np.unique(Y)\n #center_distance_pca = pca.fit(center_distance).transform(center_distance)\n #color_center_distance = [color[i] for i in labels ]\n \n # Plot\n title2 = \"Latent Space\"\n\n plt.figure()\n plt.title(title2)\n plt.scatter(X_encoder_pca[:, 0], X_encoder_pca[:, 1], c= color_encoder )\n plt.savefig(exp_dir / \"space/epoch_acc_{}.png\".format(running_accuracy / train_len ))\n plt.show()\n\ndef proj_l1ball(w0,eta,device='cpu'):\n# To help you understand, this function will perform as follow:\n# a1 = torch.cumsum(torch.sort(torch.abs(y),dim = 0,descending=True)[0],dim=0)\n# a2 = (a1 - eta)/(torch.arange(start=1,end=y.shape[0]+1))\n# a3 = torch.abs(y)- torch.max(torch.cat((a2,torch.tensor([0.0]))))\n# a4 = torch.max(a3,torch.zeros_like(y))\n# a5 = a4*torch.sign(y)\n# return a5\n \n w = T.as_tensor(w0,dtype=torch.get_default_dtype(),device=device)\n \n init_shape = w.size()\n \n if w.dim() >1:\n init_shape = w.size()\n w = w.reshape(-1)\n \n Res = torch.sign(w)*torch.max(torch.abs(w)- torch.max(torch.cat((\\\n (torch.cumsum(torch.sort(torch.abs(w),dim = 0,descending=True)[0],dim=0,dtype=torch.get_default_dtype())- eta) \\\n /torch.arange(start=1,end=w.numel()+1,device=device,dtype=torch.get_default_dtype()),\n torch.tensor([0.0],dtype=torch.get_default_dtype(),device=device))) ), torch.zeros_like(w) )\n \n Q = Res.reshape(init_shape).clone().detach()\n \n if not torch.is_tensor(w0):\n Q = Q.data.numpy()\n return Q\n\n\n\n\ndef train(cfg):\n assert cfg.get(\"device\") == \"cpu\" or (cfg.get(\"device\") == \"cuda\" and T.cuda.is_available())\n\n root_dir = Path(__file__).resolve().parents[1]\n loss_plot = []\n loss_plot.append([])\n loss_plot.append([])\n \n np.random.seed(6)\n torch.manual_seed(6)\n torch.cuda.manual_seed(6)\n \n\n \n logger.info(\"training: experiment %s\" % (cfg.get(\"exp_name\")))\n\n # make dir-tree\n exp_dir = root_dir / \"experiments\" / cfg.get(\"exp_name\")\n\n for d in [\"out\", \"checkpoint\", \"logs\"]:\n os.makedirs(exp_dir / d, exist_ok=True)\n\n #cfg.to_file(exp_dir / \"train_config.json\")\n\n # tb tb_writer\n tb_writer = SummaryWriter(exp_dir / \"logs\")\n logger.info(\"started tensorboard writer\")\n\n model = CAE()\n model.load_state_dict(T.load(cfg.get(\"checkpoint\")))\n model.eval()\n\n if cfg.get(\"device\") == \"cuda\":\n model.cuda()\n \n\n \n train_len = 9000\n test_len = 3000\n \n\n \n train_dl = torch.utils.data.DataLoader(ImagePlace(cfg.get(\"dataset_path\") ), batch_size=cfg.get(\"batch_size\"), shuffle=True, num_workers=cfg.get(\"num_workers\"), pin_memory=True)\n test_dl = torch.utils.data.DataLoader(ImagePlace(cfg.get(\"dataset_path_test\") ), batch_size=cfg.get(\"batch_size\"), shuffle=False, num_workers=cfg.get(\"num_workers\"), pin_memory=True)\n #test_dl = torch.utils.data.DataLoader(ImagePlace(cfg.get(\"dataset_path\")), batch_size=cfg.get(\"batch_size\"), shuffle=True, num_workers=cfg.get(\"num_workers\"))\n #logger.info(f\"loaded dataset from {cfg.dataset_path}\")\n\n\n ts = 0\n \n\n \n running_accuracy = 0 \n \n for batch_idx,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n \n labels = batch[1]\n\n if cfg.get(\"device\") == \"cuda\":\n x = x.detach().cuda()\n labels = labels.detach().cuda()\n\n\n\n x = x.to(memory_format=torch.channels_last)\n lab, y = model(x)\n with torch.no_grad():\n try : \n data_encoder = torch.cat((data_encoder, lab),0)\n list_label = torch.cat((list_label, labels),0)\n except NameError:\n data_encoder = lab\n list_label = labels\n \n \n \n \n \n running_accuracy += (lab.max(1)[1] == labels).sum().item() \n \n \n \n ts += 1\n # -- end batch every\n \n if batch_idx % cfg.get(\"save_every\") == 0:\n y = y.to(memory_format=torch.contiguous_format)\n img = x[0].detach().cpu().float().numpy()\n \n plt.imshow(np.transpose(img, (1, 2, 0)))\n plt.show()\n img = y[0].detach().cpu().float().numpy()\n \n plt.imshow(np.transpose(img, (1, 2, 0)))\n \n plt.show()\n print(\"Attributed label = \",lab.max(1)[1][0].item())\n # -- end save every\n # -- end batches\n\n running_accuracy_test = 0\n \n for batch_idx,batch in enumerate(tqdm(test_dl)):\n x = batch[0]\n labels = batch[1]\n\n if cfg.get(\"device\") == \"cuda\":\n x = x.detach().cuda()\n labels = labels.detach().cuda()\n\n\n\n x = x.to(memory_format=torch.channels_last)\n lab, y = model(x)\n with torch.no_grad():\n try : \n data_encoder_test = torch.cat((data_encoder_test, lab),0)\n list_label_test = torch.cat((list_label_test, labels),0)\n except NameError:\n data_encoder_test = lab\n list_label_test = labels\n \n \n \n \n \n running_accuracy_test += (lab.max(1)[1] == labels).sum().item() \n \n \n \n ts += 1\n # -- end batch every\n \n if batch_idx % cfg.get(\"save_every\") == 0:\n y = y.to(memory_format=torch.contiguous_format)\n img = x[0].detach().cpu().float().numpy()\n \n plt.imshow(np.transpose(img, (1, 2, 0)))\n plt.show()\n img = y[0].detach().cpu().float().numpy()\n \n plt.imshow(np.transpose(img, (1, 2, 0)))\n \n plt.show()\n print(\"Attributed label = \",lab.max(1)[1][0].item())\n # -- end save every\n # -- end batches\n print(\"train accuracy : \", running_accuracy / train_len)\n ShowPcaTsne(data_encoder.detach().cpu(), list_label.detach().cpu(),exp_dir,running_accuracy , train_len)\n del data_encoder\n del list_label\n \n print(\"test accuracy : \", running_accuracy_test / test_len)\n ShowPcaTsne(data_encoder_test.detach().cpu(), list_label_test.detach().cpu(),exp_dir,running_accuracy_test , test_len)\n del data_encoder_test\n del list_label_test\n\n \n \n \n\n tb_writer.close()\n #logger.info(zero_list)\n \n\nif __name__ == \"__main__\":\n #parser = argparse.ArgumentParser()\n #parser.add_argument(\"--config\", type=str, required=True)\n #args = parser.parse_args()\n\n #with open(args.config, \"rt\") as fp:\n # cfg = Namespace(**yaml.safe_load(fp))\n cfg = {}\n \n\n cfg[\"batch_size\"]= 16\n cfg[\"checkpoint\"]= \"C:\\\\Users\\\\Axel\\\\Desktop\\\\cae-master\\\\experiments\\\\trainClassi_55_2000\\\\checkpoint\\\\best_model_Mask.pth\"\n cfg[\"start_epoch\"]= 1\n cfg[\"exp_name\"] = \"testClassi_{}_{}\".format(100,2000)\n cfg[\"batch_every\"] = 1\n cfg[\"save_every\"]= 100\n cfg[\"epoch_every\"]= 1\n #cfg[\"dataset_path\"] = \"/Users/axelgustovic/Documents/Ecole/MAM5/PFE/cae-master/dataset/trainPlace\"\n cfg[\"dataset_path\"] = \"C:\\\\Users\\\\Axel\\\\Desktop\\\\cae-master\\\\dataset\\\\trainPlace\"\n cfg[\"dataset_path_test\"] = \"C:\\\\Users\\\\Axel\\\\Desktop\\\\cae-master\\\\dataset\\\\testPlace\"\n cfg[\"num_workers\"] = 2\n #cfg[\"device\"] = \"cpu\" \n cfg[\"device\"] = \"cuda\"\n \n #torch.backends.cudnn.benchmark = True \n torch.backends.cudnn.enabled = False\n\n train(cfg)\n"
] |
[
[
"torch.abs",
"torch.sign",
"torch.cat",
"sklearn.manifold.TSNE",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.get_default_dtype",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"torch.zeros_like",
"torch.is_tensor",
"numpy.transpose",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed"
]
] |
rashibudati/Text-Classification-using-AGGCN
|
[
"d64e9c9ffbd9d36f692bb003a910ce4c66d0050c"
] |
[
"prepare_vocab.py"
] |
[
"\"\"\"\nPrepare vocabulary and initial word vectors.\n\"\"\"\nimport json\nimport pickle\nimport argparse\nimport numpy as np\nfrom collections import Counter\n\nfrom utils import vocab, constant, helper\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Prepare vocab for relation extraction.')\n parser.add_argument('data_dir', help='TACRED directory.')\n parser.add_argument('vocab_dir', help='Output vocab directory.')\n parser.add_argument('--glove_dir', default='dataset/glove', help='GloVe directory.')\n parser.add_argument('--wv_file', default='glove.840B.300d.txt', help='GloVe vector file.')\n parser.add_argument('--wv_dim', type=int, default=300, help='GloVe vector dimension.')\n parser.add_argument('--min_freq', type=int, default=0, help='If > 0, use min_freq as the cutoff.')\n parser.add_argument('--lower', action='store_true', help='If specified, lowercase all words.')\n \n args = parser.parse_args()\n return args\n\ndef main():\n args = parse_args()\n \n # input files\n train_file = args.data_dir + '/train_new.json'\n test_file = args.data_dir + '/test_new.json'\n wv_file = args.glove_dir + '/' + args.wv_file\n wv_dim = args.wv_dim\n\n # output files\n helper.ensure_dir(args.vocab_dir)\n vocab_file = args.vocab_dir + '/vocab.pkl'\n emb_file = args.vocab_dir + '/embedding.npy'\n\n # load files\n print(\"loading files...\")\n train_tokens = load_tokens(train_file, args.data_dir)\n test_tokens = load_tokens(test_file, args.data_dir)\n if args.lower:\n train_tokens, test_tokens = [[t.lower() for t in tokens] for tokens in\\\n (train_tokens, test_tokens)]\n\n # load glove\n print(\"loading glove...\")\n glove_vocab = vocab.load_glove_vocab(wv_file, wv_dim)\n print(\"{} words loaded from glove.\".format(len(glove_vocab)))\n \n print(\"building vocab...\")\n v = build_vocab(train_tokens, glove_vocab, args.min_freq, args.data_dir)\n\n print(\"calculating oov...\")\n datasets = {'train': train_tokens, 'test': test_tokens}\n for dname, d in datasets.items():\n total, oov = count_oov(d, v)\n print(\"{} oov: {}/{} ({:.2f}%)\".format(dname, oov, total, oov*100.0/total))\n \n print(\"building embeddings...\")\n embedding = vocab.build_embedding(wv_file, v, wv_dim)\n print(\"embedding size: {} x {}\".format(*embedding.shape))\n\n print(\"dumping to files...\")\n with open(vocab_file, 'wb') as outfile:\n pickle.dump(v, outfile)\n np.save(emb_file, embedding)\n print(\"all done.\")\n\ndef load_tokens(filename, data_dir):\n with open(filename) as infile:\n data = json.load(infile)\n tokens = []\n for d in data:\n ts = d['token']\n if data_dir == \"dataset/tacred\":\n ss, se, os, oe = d['subj_start'], d['subj_end'], d['obj_start'], d['obj_end']\n # do not create vocab for entity words\n ts[ss:se+1] = ['<PAD>']*(se-ss+1)\n ts[os:oe+1] = ['<PAD>']*(oe-os+1)\n tokens += list(filter(lambda t: t!='<PAD>', ts))\n print(\"{} tokens from {} examples loaded from {}.\".format(len(tokens), len(data), filename))\n return tokens\n\ndef build_vocab(tokens, glove_vocab, min_freq, data_dir):\n \"\"\" build vocab from tokens and glove words. \"\"\"\n counter = Counter(t for t in tokens)\n # if min_freq > 0, use min_freq, otherwise keep all glove words\n if min_freq > 0:\n v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)\n else:\n v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)\n # add special tokens and entity mask tokens\n if data_dir == \"dataset/tacred\":\n v = constant.VOCAB_PREFIX + entity_masks() + v\n else:\n v = constant.VOCAB_PREFIX + v\n print(\"vocab built with {}/{} words.\".format(len(v), len(counter)))\n return v\n\ndef count_oov(tokens, vocab):\n c = Counter(t for t in tokens)\n total = sum(c.values())\n matched = sum(c[t] for t in vocab)\n return total, total-matched\n\ndef entity_masks():\n \"\"\" Get all entity mask tokens as a list. \"\"\"\n masks = []\n subj_entities = list(constant.SUBJ_NER_TO_ID.keys())[2:]\n obj_entities = list(constant.OBJ_NER_TO_ID.keys())[2:]\n masks += [\"SUBJ-\" + e for e in subj_entities]\n masks += [\"OBJ-\" + e for e in obj_entities]\n return masks\n\nif __name__ == '__main__':\n main()\n\n\n"
] |
[
[
"numpy.save"
]
] |
Menigedegna/Image-processing
|
[
"38b8b60c2ee7f6386136105c9541c5a905aab55a"
] |
[
"XTSegment_nucleus.py"
] |
[
"# ==============================================================================\r\n#\r\n# <CustomTools>\r\n# <Menu>\r\n# <Item name=\"XTSegment_nucleus\" icon=\"Python\" tooltip=\"XTSegment_nucleus\">\r\n# <Command>PythonXT::XTSegment_nucleus(%i)</Command>\r\n# </Item>\r\n# </Menu>\r\n# </CustomTools>\r\n# ==============================================================================\r\n\r\nimport ImarisLib\r\nfrom Tkinter import *\r\nimport os\r\nimport logging\r\nimport tkMessageBox\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tkFileDialog\r\nimport datetime\r\nfrom shutil import rmtree\r\n\r\nclass Checkbar(Frame):\r\n \"\"\"\r\n This is the Checkbar class, it sets up the pop up window\r\n \"\"\"\r\n\r\n def __init__(self, parent=None, picks=[], side=LEFT, anchor=W):\r\n Frame.__init__(self, parent)\r\n self.vars = []\r\n for pick in picks:\r\n var = IntVar()\r\n chk = Checkbutton(self, text=pick, variable=var)\r\n chk.pack(side=side, anchor=anchor, expand=YES)\r\n self.vars.append(var)\r\n\r\n def state(self):\r\n return map((lambda var: var.get()), self.vars)\r\n\r\n\r\nclass User_Set_Parameters():\r\n \"\"\"\r\n This is the User_Set_Paraneters class, it creates a pop up window,\r\n and registers user's input.\r\n \"\"\"\r\n\r\n def allstates(self):\r\n User_selection = list(self.lng.state())\r\n if sum(User_selection) > 0:\r\n self.root.destroy()\r\n self.SelectedOption = User_selection\r\n else:\r\n Message = \"Please select one of the options.\"\r\n Label(self.root, text=Message).grid(row=1)\r\n\r\n def start_pop_window(self, Options=[], Message=\"No message\"):\r\n self.root = Tk()\r\n Label(self.root, text=Message).grid(row=0)\r\n self.lng = Checkbar(self.root, Options)\r\n self.lng.grid(row=2)\r\n self.lng.config(relief=GROOVE, bd=2)\r\n Button(self.root, text='Quit', fg=\"red\", command=quit).grid(row=4)\r\n Button(self.root, text='Submit', fg=\"darkgreen\", command=self.allstates).grid(row=5)\r\n self.root.mainloop()\r\n\r\n def __init__(self, selection_options=[], message=\"None\"):\r\n self.start_pop_window(selection_options, message)\r\n\r\n\r\nclass Imaris_Plugin():\r\n \"\"\"\r\n This is the ImarisApp class, it connects to an imaris instance,\r\n registers static data : plugin name and parameters required by parameters,\r\n and keeps track of processed files\r\n \"\"\"\r\n vImaris = None\r\n file_name_list = None\r\n\r\n def error_in_csv_file(self):\r\n tkMessageBox.showinfo(title=\"Alert\",\r\n message=\"Please make sure that the \" + self.plugin_name + \"_Parameters.csv file contains the following parameters in this order:\"\r\n + \"\\n- Surface grain value for detailed nucleus segmentation : float\"\r\n + \"\\n- Surface grain value for rough nucleus segmentation : float\"\r\n + \"\\n- Surface grain value for nucleolus segmentation : float\"\r\n + \"\\n- Local surface diameter for nucleolus segmentation : float\"\r\n + \"\\n- Local surface diameter for nucleus segmentation : float\"\r\n + \"\\n- Surface grain value for chromocenter segmentation : float\"\r\n + \"\\n- Local surface diameter for chromocenter segmentation : float\"\r\n + \"\\n- Remove nucleus surface close to border : boolean\"\r\n + \"\\n- Select nucleus surface with highest volume : boolean\"\r\n + \"\\n- Minimum volume threshold for chromocenter segmentation: float\"\r\n + \"\\n- Minimum intensity threshold for chromocenter segmentation: 0 < float < 1\"\r\n + \"\\n- Minimum volume threshold for nucleus segmentation: float\"\r\n + \"\\n- Save image : boolean\"\r\n + \"\\n- Sigma value for gaussian filter channel smoothing\"\r\n + \"\\n- Sigma value for channel background substraction\")\r\n quit()\r\n\r\n def get_plugin_parameters(self):\r\n currentDirectory = os.getcwd()\r\n AllFilesInDirectory = os.listdir(currentDirectory)\r\n ParametersList = None\r\n parameter_file = self.plugin_name+\"_Parameters.csv\"\r\n if parameter_file in AllFilesInDirectory:\r\n ParameterData = pd.read_csv(parameter_file, sep=\";\", header='infer', decimal='.')\r\n if \"Value\" in ParameterData.columns:\r\n ParametersList = list(ParameterData[\"Value\"])\r\n return ParametersList\r\n else:\r\n tkMessageBox.showinfo(title=\"Error\", message=\"Please make sure the '\" + parameter_file + \"' file contains a column 'Value' containing the values necessary for this plugin.\")\r\n quit()\r\n else:\r\n tkMessageBox.showinfo(title=\"Error\", message=\"Please make sure there is a '\" + parameter_file + \"' in the folder containing this plugin.\")\r\n quit()\r\n\r\n def get_saved_parameters(self):\r\n parameter_list = self.get_plugin_parameters()\r\n parameter_list = [float(x) for x in parameter_list]\r\n if len(parameter_list) >= 15:\r\n self.nucleus_detail_surface_grain = parameter_list[0]\r\n self.nucleus_rough_surface_grain = parameter_list[1]\r\n self.nucleolus_surface_grain = parameter_list[2]\r\n self.nucleolus_surface_surface_diameter = parameter_list[3]\r\n self.nucleus_surface_surface_diameter = parameter_list[4]\r\n self.chromocenter_surface_grain = parameter_list[5]\r\n self.chromocenter_surface_diameter = parameter_list[6]\r\n self.remove_border_toggle = parameter_list[7]\r\n self.select_biggest_nucleus_toggle = parameter_list[8]\r\n self.chromocenter_minimum_volume = parameter_list[9]\r\n self.chromocenter_minimum_intensity = parameter_list[10]\r\n self.nucleus_minimum_volume = parameter_list[11]\r\n self.save_image_toggle = parameter_list[12]\r\n self.gaussian_filter_sigma = parameter_list[13]\r\n self.background_substraction_sigma = parameter_list[14]\r\n else:\r\n self.error_in_csv_file()\r\n\r\n def get_user_set_parameters(self):\r\n pop_up_instance = User_Set_Parameters(\r\n [\"Batch of images\", \"Just one image\"],\r\n \"Do you wish to run the script on a batch of images or just on opened image?\"\r\n )\r\n # get first boolean: is \"Batch of images\" selected?\r\n self.BatchProcessingToggle = pop_up_instance.SelectedOption[0]\r\n\r\n pop_up_instance = User_Set_Parameters(\r\n [\"Yes\", \"No\"],\r\n \"Would you like to run the segmentation module?\"\r\n )\r\n # get first boolean : is \"yes\" selected?\r\n self.SegmentationToggle = pop_up_instance.SelectedOption[0]\r\n\r\n pop_up_instance = User_Set_Parameters(\r\n [\"The nucleus\", \"The nucleolus\", \"The chromocenters\"],\r\n \"Which surface(s) are you analysing?\"\r\n )\r\n # get list of boolean\r\n self.SurfaceOption = pop_up_instance.SelectedOption\r\n\r\n def get_user_set_directory(self):\r\n root1 = Tk()\r\n Image_folder = tkFileDialog.askdirectory(parent=root1, initialdir=\"/\", title='Please select the directory containing the images to be processed. \\n The folder containing the resulting files will be saved in this directory.')\r\n root1.destroy()\r\n self.image_folder = Image_folder\r\n\r\n def get_file_directory(self):\r\n vFileName = self.vImaris.GetCurrentFileName()\r\n vFilePath = os.path.dirname(vFileName)\r\n self.image_folder = vFilePath\r\n return os.path.split(vFileName)[1]\r\n\r\n def create_directory(self):\r\n Result_pathway = os.path.join(self.image_folder, self.plugin_name + \"_Result\")\r\n counter = 0\r\n while os.path.exists(Result_pathway) and counter < 3:\r\n # tkMessageBox.showinfo(title=\"Alert\",\r\n # message=\"Please save the folder '\" + self.plugin_name + \"_Result' under another name first!\")\r\n # quit()\r\n print(\"Please save the folder '\" + self.plugin_name + \"_Result' under another name!\")\r\n print(\"If you press ENTER three times, without renaming your folder, IT WILL BE DELETED!\")\r\n raw_input(\"Press ENTER to continue.\")\r\n counter += 1\r\n if os.path.exists(Result_pathway):\r\n rmtree(Result_pathway)\r\n os.makedirs(Result_pathway)\r\n self.result_path = Result_pathway\r\n\r\n\r\n def __init__(self, aImarisId, plugin_name):\r\n try:\r\n vImarisLib = ImarisLib.ImarisLib()\r\n self.vImaris = vImarisLib.GetApplication(aImarisId)\r\n self.plugin_name = plugin_name\r\n if self.vImaris is not None:\r\n # Get parameters saved in .csv file with the same name as this plugin\r\n self.get_saved_parameters()\r\n # Set up pop up windows to ask user for few parameters\r\n self.get_user_set_parameters()\r\n if self.BatchProcessingToggle:\r\n self.get_user_set_directory()\r\n self.file_name_list = [i for i in os.listdir(self.image_folder) if i.endswith('.ims') or i.endswith('.ics')]\r\n else:\r\n vFileName = self.get_file_directory()\r\n self.file_name_list = [vFileName]\r\n self.create_directory()\r\n else:\r\n tkMessageBox.showinfo(title=\"Alert\", message=\"Can't connect with Imaris!\")\r\n quit()\r\n except:\r\n logging.exception(\"Oops: Error inside class Imaris_Plugin\")\r\n\r\n\r\nclass Images():\r\n\r\n _plugin_parameters = None\r\n _number_processed_file = 0\r\n\r\n def get_plugin_instance(self, plugin_instance):\r\n if self._plugin_parameters is None:\r\n self._plugin_parameters = plugin_instance\r\n\r\n def get_channel_number(self):\r\n self.Total_channel = self.image.GetSizeC()\r\n\r\n def get_selected_channel(self):\r\n self.get_channel_number()\r\n pop_up_instance = User_Set_Parameters(\r\n range(self.Total_channel),\r\n \"Select DAPI channel.\")\r\n # get first occurance of True in list\r\n dapi_channel = [i for i, x in enumerate(pop_up_instance.SelectedOption) if x == 1]\r\n self.dapi_channel = dapi_channel[0]\r\n\r\n def logtime(self, task_name=\"\", give_duration=True):\r\n # This is the logtime, logs start and end of tasks\r\n curtime = datetime.datetime.now()\r\n if self.gLasttime is not None and give_duration:\r\n diff = (curtime-self.gLasttime).total_seconds()\r\n else:\r\n diff = curtime.ctime()\r\n self.gLasttime = curtime\r\n print(str(diff) + ' sec. : ' + task_name)\r\n\r\n def open_file(self):\r\n full_path_file_name = os.path.join(self._plugin_parameters.image_folder, self.file_name)\r\n self._plugin_parameters.vImaris.FileOpen(full_path_file_name, \"\")\r\n\r\n\r\n def __init__(self, plugin_instance, file_name):\r\n self.gLasttime = None\r\n self.file_name = file_name\r\n try:\r\n self.get_plugin_instance(plugin_instance)\r\n self.open_file()\r\n self.image = self._plugin_parameters.vImaris.GetDataSet()\r\n if self.image is not None:\r\n task_name = \"Start processing file - \" + self.file_name\r\n self.logtime(task_name=task_name)\r\n self.volume = self._plugin_parameters.vImaris.GetSurpassSelection()\r\n self.factory = self._plugin_parameters.vImaris.GetFactory()\r\n self.scene = self._plugin_parameters.vImaris.GetSurpassScene()\r\n self.container = self.factory.CreateDataContainer()\r\n self.container.SetName('Segmented objects')\r\n if self._number_processed_file == 0:\r\n self.get_selected_channel()\r\n else:\r\n print(\"No image detected in file\" + self.file_name)\r\n except:\r\n logging.exception(\"Oops: Error inside class Images\")\r\n\r\n def remove_container(self):\r\n self.scene.RemoveChild(self.container)\r\n\r\n def exit_file(self):\r\n if self._plugin_parameters.save_image_toggle:\r\n full_path = os.path.join(self._plugin_parameters.result_path, self.file_name+\".ims\")\r\n self._plugin_parameters.vImaris.FileSave(full_path, \"\")\r\n task_name = \"Save file\"\r\n self.logtime(task_name=task_name)\r\n self.remove_container()\r\n task_name = \"End processing file - \" + self.file_name\r\n self.logtime(task_name=task_name, give_duration=False)\r\n self._number_processed_file += 1\r\n\r\n# ==============================================================================\r\n# FUNCTIONS REQUIRED BY SURFACE CLASS\r\n# ==============================================================================\r\n# Get numeric data for an item in scene\r\ndef GetStat(vSceneItem, FilterString):\r\n vAllStatistics = vSceneItem.GetStatistics()\r\n vNames = vAllStatistics.mNames\r\n vValues = vAllStatistics.mValues\r\n OutputParam = [float(vValues[a]) for a, x in enumerate(vNames) if x == FilterString]\r\n return OutputParam\r\n\r\n# Get max intensity from list of intensity values #\r\ndef getMax(ListIntensity):\r\n maxList = [max(i) for j in ListIntensity for i in j]\r\n maxValue = max(maxList)\r\n return maxValue\r\n\r\n# Select voxels with low DAPI intensity #\r\ndef getLowIntensity(detailed_nucleus_mask_values, smoothed_channel_mask, rough_nucleus_mask_values):\r\n maxValue = getMax(smoothed_channel_data)\r\n Result = []\r\n for z in range(len(smoothed_channel_data)):\r\n mask_y = []\r\n for y in range(len(smoothed_channel_data[0])):\r\n mask_surface = detailed_nucleus_mask_values[z][y]\r\n mask_surface2 = rough_nucleus_mask_values[z][y]\r\n mask = smoothed_channel_mask[z][y]\r\n # DAPI MASK: NORMALISE INTENSITY WITH MAXIMUM VALUE AND ROUND UP TO 1\r\n mask = [round(item / maxValue, 1) for item in mask]\r\n # = SELECT VOXELS WITH BELOW AVERAGE INTENSITY\r\n mask = [0 if item >= 0.5 else item for item in mask]\r\n # = SELECT VOXELS OUTSIDE OF DETAILED SURFACE\r\n mask_surface = [maxValue if item == 0 else 0 for item in mask_surface]\r\n # REMOVE VOXELS OUTSIDE OF ROUGH SURFACE\r\n mask = [i * j * k for i, j, k in zip(mask, mask_surface, mask_surface2)]\r\n mask_y.append(mask)\r\n Result.append(mask_y)\r\n return Result\r\n\r\n\r\nclass Surface(Images):\r\n \"\"\"\r\n This is Surface class. It inherits Images class and segments the nucleus,\r\n the nucleolus and the chromocenters\r\n \"\"\"\r\n\r\n def create_surface_in_scene(self):\r\n # ADD ITEM TO SCENE\r\n self.surface.SetName(self.object_type)\r\n self.container.AddChild(self.surface, -1)\r\n self.scene.AddChild(self.container, -1)\r\n # SAVE SNAPSHOT AND DESELECT ITEM FROM SCENE\r\n full_path = os.path.join(self._plugin_parameters.result_path,\r\n \"Snapshot\" + self.object_type +\"_\"+self.file_name+\".tif\")\r\n self._plugin_parameters.vImaris.SaveSnapShot(full_path)\r\n #deselect item from scene\r\n self.surface.SetVisible(0)\r\n\r\n def select_surface(self):\r\n vTimeIndex = 0\r\n vol = GetStat(self.surface, \"Volume\")\r\n if self.object_type == \"nucleolus\":\r\n area = GetStat(self.surface, \"Area\")\r\n volumeToAreaRatio = [x/y for x, y in zip(vol, area)]\r\n SelectedID = max(xrange(len(volumeToAreaRatio)), key=volumeToAreaRatio.__getitem__)\r\n if self.object_type == \"nucleus\":\r\n SelectedID = max(xrange(len(vol)), key=vol.__getitem__)\r\n vertices = self.surface.GetVertices(SelectedID)\r\n vNormals = self.surface.GetNormals(SelectedID)\r\n faces = self.surface.GetTriangles(SelectedID)\r\n self.surface = self.factory.CreateSurfaces()\r\n self.surface.AddSurface(vertices, faces, vNormals, vTimeIndex)\r\n\r\n def set_string_filter(self):\r\n SfS = '\"Volume\" above ' + str(self._plugin_parameters.nucleus_minimum_volume) + ' um^3'\r\n if self._plugin_parameters.remove_border_toggle:\r\n SfS = SfS + ' \"Distance to Image Border XYZ\" above 0.0516 um'\r\n return SfS\r\n\r\n def segment_surface(self, surface_grain, channel_index, string_filter, local_diameter, vATM):\r\n vATA = 1\r\n if vATM > 0:\r\n vATA = 0\r\n vROI = None\r\n surface = self._plugin_parameters.vImaris.GetImageProcessing().DetectSurfaces(\r\n self.image,\r\n vROI,\r\n channel_index,\r\n surface_grain,\r\n local_diameter,\r\n vATA,\r\n vATM,\r\n string_filter)\r\n return surface\r\n\r\n def Get_Mask_data(surface):\r\n vImageSizeX = self.image.GetSizeX()\r\n vImageSizeY = self.image.GetSizeY()\r\n vImageSizeZ = self.image.GetSizeZ()\r\n vExtentMinX = self.image.GetExtendMinX()\r\n vExtentMinY = self.image.GetExtendMinY()\r\n vExtentMinZ = self.image.GetExtendMinZ()\r\n vExtentMaxX = self.image.GetExtendMaxX()\r\n vExtentMaxY = self.image.GetExtendMaxY()\r\n vExtentMaxZ = self.image.GetExtendMaxZ()\r\n mask_min = [vExtentMinX, vExtentMinY, vExtentMinZ]\r\n mask_max = [vExtentMaxX, vExtentMaxY, vExtentMaxZ]\r\n mask_size = [vImageSizeX, vImageSizeY, vImageSizeZ]\r\n mask_time =\t0\r\n mask =\tsurface.GetMask(mask_min[0], mask_min[1], mask_min[2], mask_max[0], mask_max[1], mask_max[2],mask_size[0],mask_size[1], mask_size[2], mask_time)\r\n mask_values =\tmask.GetDataVolumeFloats(0,0)\r\n return mask_values\r\n\r\n def AddChannel(self, list_voxel_intensity, channel_name):\r\n time_index = 0\r\n channel_index = self.Total_channel\r\n DAPI_channel_color = self.image.GetChannelColorRGBA(self.dapi_channel)\r\n self.Total_channel += 1\r\n self.image.SetSizeC(self.Total_channel)\r\n self.image.SetDataVolumeFloats(list_voxel_intensity, channel_index, time_index)\r\n self.image.SetChannelName(channel_index, channel_name)\r\n self.image.SetChannelColorRGBA (channel_index, DAPI_channel_color)\r\n\r\n def new_segment_decorator(segment_func):\r\n def edit_scene(self):\r\n segment_func(self)\r\n if self.surface is not None:\r\n if self.object_type in [\"nucleus\", \"nucleolus\"]:\r\n self.select_surface()\r\n self.create_surface_in_scene()\r\n else:\r\n print(self.object_type.upper() +\" IS NOT DETECTED!\")\r\n return edit_scene\r\n\r\n @new_segment_decorator\r\n def detect_nucleolus(self):\r\n surface = None\r\n vATM = 0\r\n string_filter=self.set_string_filter()\r\n # Do a detailed nucleus segmentation: set a low sigma to gaussian filter\r\n detailed_nucleus_segmentation = self.segment_surface(\r\n self._plugin_parameters.nucleus_detail_surface_grain,\r\n self.smooth_dapi_channel,\r\n string_filter,\r\n self.nucleus_surface_surface_diameter,\r\n vATM)\r\n if detailed_nucleus_segmentation.GetNumberOfSurfaces() > 0:\r\n # Do rough nucleus segmentation: set a high sigma to gaussian filter\r\n rough_nucleus_segmentation = self.segment_surface(\r\n self._plugin_parameters.nucleus_rough_surface_grain,\r\n self.smooth_dapi_channel,\r\n string_filter,\r\n self._plugin_parameters.nucleus_surface_surface_diameter,\r\n vATM)\r\n # Get DAPI channel mask\r\n smoothed_channel_data = self.image.GetDataVolumeFloats(self.smooth_dapi_channel,0)\r\n # Get mask for detailed / rough nucleus segmentation\r\n Detailed_nucleus_mask = Get_Mask_data(detailed_nucleus_segmentation)\r\n Rough_nucleus_mask = Get_Mask_data(rough_nucleus_segmentation)\r\n # From the above masks, infere nucleolus mask\r\n Nucleolus_mask = getLowIntensity(\r\n Detailed_nucleus_mask,\r\n smoothed_channel_data,\r\n Rough_nucleus_mask)\r\n # Create new channel with nucleolus mask\r\n self.AddChannel(Nucleolus_mask, \"Low DAPI intensity\")\r\n # Segment new channel\r\n string_filter = \"\"\r\n surface = self.segment_surface(\r\n self._plugin_parameters.nucleolus_surface_grain,\r\n self.Total_channel-1,\r\n string_filter,\r\n self._plugin_parameters.nucleolus_surface_surface_diameter,\r\n vATM)\r\n self.surface = surface\r\n\r\n @new_segment_decorator\r\n def detect_chromocenters(self):\r\n numberSceneInstance = self.scene.GetNumberOfChildren()\r\n VolumeNotFound = True\r\n i = 0\r\n while i <= numberSceneInstance and VolumeNotFound:\r\n selection = self.scene.GetChild(i)\r\n aVolume = self.factory.ToVolume(selection)\r\n if aVolume is not None:\r\n VolumeNotFound = False\r\n i += 1\r\n IntensityMax = GetStat(aVolume, \"Data Intensity Max\")[self.smooth_dapi_channel]\r\n string_filter = '\"Volume\" above '+str(self._plugin_parameters.chromocenter_minimum_volume)+' um^3'\r\n vATM = IntensityMax * self._plugin_parameters.chromocenter_minimum_intensity\r\n surface_grain = self._plugin_parameters.chromocenter_surface_grain\r\n local_diameter = self._plugin_parameters.chromocenter_surface_diameter\r\n surface = self.segment_surface(\r\n surface_grain,\r\n self.smooth_dapi_channel,\r\n string_filter,\r\n local_diameter,\r\n vATM)\r\n self.surface = surface if surface.GetNumberOfSurfaces() > 0 else None\r\n\r\n @new_segment_decorator\r\n def detect_nucleus(self):\r\n vATM = 0\r\n string_filter = self.set_string_filter()\r\n surface = self.segment_surface(\r\n self._plugin_parameters.nucleus_rough_surface_grain,\r\n self.dapi_channel,\r\n string_filter,\r\n self._plugin_parameters.nucleus_surface_surface_diameter,\r\n vATM)\r\n self.surface = surface if surface.GetNumberOfSurfaces() > 0 else None\r\n\r\n def apply_gaussian_filter(self):\r\n vImage_data = self.image.GetDataVolumeFloats(\r\n self.dapi_channel,0)\r\n self.AddChannel(vImage_data, \"Smooth DAPI channel\")\r\n self.smooth_dapi_channel = self.Total_channel-1\r\n self._plugin_parameters.vImaris.GetImageProcessing().GaussFilterChannel(\r\n self.image,\r\n self.smooth_dapi_channel,\r\n self._plugin_parameters.gaussian_filter_sigma)\r\n self._plugin_parameters.vImaris.GetImageProcessing().SubtractBackgroundChannel(\r\n self.image,\r\n self.smooth_dapi_channel,\r\n self._plugin_parameters.background_substraction_sigma)\r\n\r\n # TODO: GET FEATURES OF SURFACES\r\n # TODO: CHECK NUCLEOLUS AND CC DETECTION\r\n # TODO: ERROR: END FILE PROCESSING DISPLAYED BEFORE END OF CLASS SURFACE\r\n\r\n def Segment_nucleus(self):\r\n try:\r\n # Segment the nucleus\r\n self.object_type = \"nucleus\"\r\n self.detect_nucleus()\r\n self.nucleus = self.surface\r\n self.logtime(task_name=\"Detect nucleus surface\")\r\n if self._plugin_parameters.SurfaceOption[1] or self._plugin_parameters.SurfaceOption[2]:\r\n # Apply gaussian filter to DAPI channel and create new channel\r\n self.apply_gaussian_filter()\r\n self.logtime(task_name=\"Smooth DAPI channel\")\r\n if self._plugin_parameters.SurfaceOption[1]:\r\n # Segment nucleous\r\n self.object_type = \"nucleolus\"\r\n self.detect_nucleolus()\r\n self.logtime(task_name=\"Detect nucleus surface\")\r\n if self._plugin_parameters.SurfaceOption[2]:\r\n # Segment chromocenters\r\n self.object_type = \"chromocenters\"\r\n self.detect_chromocenters()\r\n self.logtime(task_name=\"Detect chromocenter surface\")\r\n except:\r\n logging.exception(\"Oops: Error inside class Surface\")\r\n\r\ndef XTSegment_nucleus(aImarisId):\r\n # Connect to Imaris\r\n plugin_name = \"XTSegment_nucleus\"\r\n logging.basicConfig(level=logging.DEBUG, filename=\"log[\"+plugin_name+\"].log\")\r\n try:\r\n plugin_instance = Imaris_Plugin(aImarisId, plugin_name)\r\n for file_name in plugin_instance.file_name_list:\r\n # image_instance = Images(plugin_instance, file_name)\r\n # print(image_instance._number_processed_file)\r\n image_instance=Surface(plugin_instance=plugin_instance, file_name=plugin_instance.file_name_list[0])\r\n image_instance.Segment_nucleus()\r\n print(\"plugin working\")\r\n raw_input(\"Press Enter to terminate.\")\r\n except:\r\n logging.exception(\"Oops: Error in main XT function\")\r\n"
] |
[
[
"pandas.read_csv"
]
] |
elybrand/neurodsp
|
[
"96355f4c75e1eedef2a77a8bfafc718f80b8dae3"
] |
[
"neurodsp/plts/time_series.py"
] |
[
"\"\"\"Plots for time series.\"\"\"\n\nfrom itertools import repeat, cycle\n\nimport numpy as np\nimport numpy.ma as ma\nimport matplotlib.pyplot as plt\n\nfrom neurodsp.plts.style import style_plot\nfrom neurodsp.plts.utils import check_ax, savefig\n\n###################################################################################################\n###################################################################################################\n\n@savefig\n@style_plot\ndef plot_time_series(times, sigs, labels=None, colors=None, ax=None, **kwargs):\n \"\"\"Plot a time series.\n\n Parameters\n ----------\n times : 1d array or list of 1d array\n Time definition(s) for the time series to be plotted.\n sigs : 1d array or list of 1d array\n Time series to plot.\n labels : list of str, optional\n Labels for each time series.\n colors : str or list of str\n Colors to use to plot lines.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n **kwargs\n Keyword arguments for customizing the plot.\n\n Examples\n --------\n Create a time series plot:\n\n >>> from neurodsp.sim import sim_combined\n >>> from neurodsp.utils import create_times\n >>> sig = sim_combined(n_seconds=10, fs=500,\n ... components={'sim_powerlaw': {'exponent': -1.5, 'f_range': (2, None)},\n ... 'sim_oscillation' : {'freq': 10}})\n >>> times = create_times(n_seconds=10, fs=500)\n >>> plot_time_series(times, sig)\n \"\"\"\n\n ax = check_ax(ax, (15, 3))\n\n times = repeat(times) if isinstance(times, np.ndarray) else times\n sigs = [sigs] if isinstance(sigs, np.ndarray) else sigs\n\n if labels is not None:\n labels = [labels] if not isinstance(labels, list) else labels\n else:\n labels = repeat(labels)\n\n # If not provided, default colors for up to two signals to be black & red\n if not colors and len(sigs) <= 2:\n colors = ['k', 'r']\n colors = repeat(colors) if not isinstance(colors, list) else cycle(colors)\n\n for time, sig, color, label in zip(times, sigs, colors, labels):\n ax.plot(time, sig, color=color, label=label)\n\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Voltage (uV)')\n\n\n@savefig\n@style_plot\ndef plot_instantaneous_measure(times, sigs, measure='phase', ax=None, **kwargs):\n \"\"\"Plot an instantaneous measure, of phase, amplitude or frequency.\n\n Parameters\n ----------\n times : 1d array or list of 1d array\n Time definition(s) for the time series to be plotted.\n sigs : 1d array or list of 1d array\n Time series to plot.\n measure : {'phase', 'amplitude', 'frequency'}\n Which kind of measure is being plotted.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n **kwargs\n Keyword arguments to pass into `plot_time_series`, and/or for customizing the plot.\n\n Examples\n --------\n Create an instantaneous phase plot:\n\n >>> from neurodsp.sim import sim_combined\n >>> from neurodsp.utils import create_times\n >>> from neurodsp.timefrequency import phase_by_time\n >>> sig = sim_combined(n_seconds=2, fs=500,\n ... components={'sim_powerlaw': {}, 'sim_oscillation' : {'freq': 10}})\n >>> pha = phase_by_time(sig, fs=500, f_range=(8, 12))\n >>> times = create_times(n_seconds=2, fs=500)\n >>> plot_instantaneous_measure(times, pha, measure='phase')\n \"\"\"\n\n if measure not in ['phase', 'amplitude', 'frequency']:\n raise ValueError('Measure not understood.')\n\n if measure == 'phase':\n plot_time_series(times, sigs, ax=ax, ylabel='Phase (rad)', **kwargs)\n plt.yticks([-np.pi, 0, np.pi], ['-$\\pi$', 0, '$\\pi$'])\n elif measure == 'amplitude':\n plot_time_series(times, sigs, ax=ax, ylabel='Amplitude', **kwargs)\n elif measure == 'frequency':\n plot_time_series(times, sigs, ax=ax, ylabel='Instantaneous\\nFrequency (Hz)', **kwargs)\n\n\n@savefig\n@style_plot\ndef plot_bursts(times, sig, bursting, ax=None, **kwargs):\n \"\"\"Plot a time series, with labeled bursts.\n\n Parameters\n ----------\n times : 1d array\n Time definition for the time series to be plotted.\n sig : 1d array\n Time series to plot.\n bursting : 1d array\n A boolean array which indicates identified bursts.\n ax : matplotlib.Axes, optional\n Figure axes upon which to plot.\n **kwargs\n Keyword arguments to pass into `plot_time_series`, and/or for customizing the plot.\n\n Examples\n --------\n Create a plot of burst activity:\n\n >>> from neurodsp.sim import sim_combined\n >>> from neurodsp.utils import create_times\n >>> from neurodsp.burst import detect_bursts_dual_threshold\n >>> sig = sim_combined(n_seconds=10, fs=500,\n ... components={'sim_synaptic_current': {},\n ... 'sim_bursty_oscillation' : {'freq': 10}},\n ... component_variances=(0.1, 0.9))\n >>> is_burst = detect_bursts_dual_threshold(sig, fs=500, dual_thresh=(1, 2), f_range=(8, 12))\n >>> times = create_times(n_seconds=10, fs=500)\n >>> plot_bursts(times, sig, is_burst, labels=['Raw Data', 'Detected Bursts'])\n \"\"\"\n\n ax = check_ax(ax, (15, 3))\n\n bursts = ma.array(sig, mask=np.invert(bursting))\n plot_time_series(times, [sig, bursts], ax=ax, **kwargs)\n"
] |
[
[
"matplotlib.pyplot.yticks",
"numpy.invert"
]
] |
Ewpratten/frc_971_mirror
|
[
"3a8a0c4359f284d29547962c2b4c43d290d8065c",
"3a8a0c4359f284d29547962c2b4c43d290d8065c"
] |
[
"frc971/control_loops/python/haptic_wheel.py",
"frc971/analysis/plot_test.py"
] |
[
"#!/usr/bin/python\n\nfrom frc971.control_loops.python import control_loop\nfrom frc971.control_loops.python import controls\nimport numpy\nimport sys\nimport copy\nimport scipy.interpolate\nfrom matplotlib import pylab\n\nimport gflags\nimport glog\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_bool('plot', False, 'If true, plot the loop response.')\ngflags.DEFINE_string('data', None, 'If defined, plot the provided CAN data')\ngflags.DEFINE_bool(\n 'rerun_kf', False,\n 'If true, rerun the KF. The torque in the data file will be interpreted as the commanded current.'\n)\n\n\nclass SystemParams(object):\n\n def __init__(self, J, G, kP, kD, kCompensationTimeconstant, q_pos, q_vel,\n q_torque, current_limit):\n self.J = J\n self.G = G\n self.q_pos = q_pos\n self.q_vel = q_vel\n self.q_torque = q_torque\n self.kP = kP\n self.kD = kD\n self.kCompensationTimeconstant = kCompensationTimeconstant\n self.r_pos = 0.001\n self.current_limit = current_limit\n\n #[15.0, 0.25],\n #[10.0, 0.2],\n #[5.0, 0.13],\n #[3.0, 0.10],\n #[2.0, 0.08],\n #[1.0, 0.06],\n #[0.5, 0.05],\n #[0.25, 0.025],\n\n\nkWheel = SystemParams(\n J=0.0008,\n G=(1.25 + 0.02) / 0.35,\n q_pos=0.001,\n q_vel=0.20,\n q_torque=0.005,\n kP=7.0,\n kD=0.0,\n kCompensationTimeconstant=0.95,\n current_limit=4.5)\nkTrigger = SystemParams(\n J=0.00025,\n G=(0.925 * 2.0 + 0.02) / 0.35,\n q_pos=0.001,\n q_vel=0.1,\n q_torque=0.005,\n kP=120.0,\n kD=1.8,\n kCompensationTimeconstant=0.95,\n current_limit=3.0)\n\n\nclass HapticInput(control_loop.ControlLoop):\n\n def __init__(self, params=None, name='HapticInput'):\n # The defaults are for the steering wheel.\n super(HapticInput, self).__init__(name)\n motor = self.motor = control_loop.MN3510()\n\n # Moment of inertia of the wheel in kg m^2\n self.J = params.J\n\n # Control loop time step\n self.dt = 0.001\n\n # Gear ratio from the motor to the input.\n self.G = params.G\n\n self.A_continuous = numpy.matrix(numpy.zeros((2, 2)))\n self.A_continuous[1, 1] = 0\n self.A_continuous[0, 1] = 1\n\n self.B_continuous = numpy.matrix(numpy.zeros((2, 1)))\n self.B_continuous[1, 0] = motor.Kt * self.G / self.J\n\n # State feedback matrices\n # [position, angular velocity]\n self.C = numpy.matrix([[1.0, 0.0]])\n self.D = numpy.matrix([[0.0]])\n\n self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,\n self.B_continuous, self.dt)\n\n self.U_max = numpy.matrix([[2.5]])\n self.U_min = numpy.matrix([[-2.5]])\n\n self.L = numpy.matrix([[0.0], [0.0]])\n self.K = numpy.matrix([[0.0, 0.0]])\n\n self.InitializeState()\n\n\nclass IntegralHapticInput(HapticInput):\n\n def __init__(self, params=None, name=\"IntegralHapticInput\"):\n super(IntegralHapticInput, self).__init__(name=name, params=params)\n\n self.A_continuous_unaugmented = self.A_continuous\n self.B_continuous_unaugmented = self.B_continuous\n\n self.A_continuous = numpy.matrix(numpy.zeros((3, 3)))\n self.A_continuous[0:2, 0:2] = self.A_continuous_unaugmented\n self.A_continuous[1, 2] = (1 / self.J)\n\n self.B_continuous = numpy.matrix(numpy.zeros((3, 1)))\n self.B_continuous[0:2, 0] = self.B_continuous_unaugmented\n\n self.C_unaugmented = self.C\n self.C = numpy.matrix(numpy.zeros((1, 3)))\n self.C[0:1, 0:2] = self.C_unaugmented\n\n self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,\n self.B_continuous, self.dt)\n\n self.Q = numpy.matrix([[(params.q_pos**2.0), 0.0, 0.0],\n [0.0, (params.q_vel**2.0), 0.0],\n [0.0, 0.0, (params.q_torque**2.0)]])\n\n self.R = numpy.matrix([[(params.r_pos**2.0)]])\n\n self.KalmanGain, self.Q_steady = controls.kalman(\n A=self.A, B=self.B, C=self.C, Q=self.Q, R=self.R)\n self.L = self.A * self.KalmanGain\n\n self.K_unaugmented = self.K\n self.K = numpy.matrix(numpy.zeros((1, 3)))\n self.K[0, 0:2] = self.K_unaugmented\n self.K[0, 2] = 1.0 / (self.motor.Kt / (self.motor.resistance))\n\n self.InitializeState()\n\n\ndef ReadCan(filename):\n \"\"\"Reads the candump in filename and returns the 4 fields.\"\"\"\n trigger = []\n trigger_velocity = []\n trigger_torque = []\n trigger_current = []\n wheel = []\n wheel_velocity = []\n wheel_torque = []\n wheel_current = []\n\n trigger_request_time = [0.0]\n trigger_request_current = [0.0]\n wheel_request_time = [0.0]\n wheel_request_current = [0.0]\n\n with open(filename, 'r') as fd:\n for line in fd:\n data = line.split()\n can_id = int(data[1], 16)\n if can_id == 0:\n data = [int(d, 16) for d in data[3:]]\n trigger.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0)\n trigger_velocity.append(\n ((data[2] + (data[3] << 8)) - 32768) / 32768.0)\n trigger_torque.append(\n ((data[4] + (data[5] << 8)) - 32768) / 32768.0)\n trigger_current.append(\n ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0)\n elif can_id == 1:\n data = [int(d, 16) for d in data[3:]]\n wheel.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0)\n wheel_velocity.append(\n ((data[2] + (data[3] << 8)) - 32768) / 32768.0)\n wheel_torque.append(\n ((data[4] + (data[5] << 8)) - 32768) / 32768.0)\n wheel_current.append(\n ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0)\n elif can_id == 2:\n data = [int(d, 16) for d in data[3:]]\n trigger_request_current.append(\n ((data[4] + (data[5] << 8)) - 32768) / 32768.0)\n trigger_request_time.append(len(trigger) * 0.001)\n elif can_id == 3:\n data = [int(d, 16) for d in data[3:]]\n wheel_request_current.append(\n ((data[4] + (data[5] << 8)) - 32768) / 32768.0)\n wheel_request_time.append(len(wheel) * 0.001)\n\n trigger_data_time = numpy.arange(0, len(trigger)) * 0.001\n wheel_data_time = numpy.arange(0, len(wheel)) * 0.001\n\n # Extend out the data in the interpolation table.\n trigger_request_time.append(trigger_data_time[-1])\n trigger_request_current.append(trigger_request_current[-1])\n wheel_request_time.append(wheel_data_time[-1])\n wheel_request_current.append(wheel_request_current[-1])\n\n return (trigger_data_time, wheel_data_time, trigger, wheel,\n trigger_velocity, wheel_velocity, trigger_torque, wheel_torque,\n trigger_current, wheel_current, trigger_request_time,\n trigger_request_current, wheel_request_time, wheel_request_current)\n\n\ndef rerun_and_plot_kf(data_time,\n data_radians,\n data_current,\n data_request_current,\n params,\n run_correct=True):\n kf_velocity = []\n dt_velocity = []\n kf_position = []\n adjusted_position = []\n last_angle = None\n haptic_observer = IntegralHapticInput(params=params)\n\n # Parameter sweep J.\n num_kf = 1\n min_J = max_J = params.J\n\n # J = 0.0022\n #num_kf = 15\n #min_J = min_J / 2.0\n #max_J = max_J * 2.0\n initial_velocity = (data_radians[1] - data_radians[0]) * 1000.0\n\n def DupParamsWithJ(params, J):\n p = copy.copy(params)\n p.J = J\n return p\n\n haptic_observers = [\n IntegralHapticInput(params=DupParamsWithJ(params, j))\n for j in numpy.logspace(\n numpy.log10(min_J), numpy.log10(max_J), num=num_kf)\n ]\n # Initialize all the KF's.\n haptic_observer.X_hat[1, 0] = initial_velocity\n haptic_observer.X_hat[0, 0] = data_radians[0]\n for observer in haptic_observers:\n observer.X_hat[1, 0] = initial_velocity\n observer.X_hat[0, 0] = data_radians[0]\n\n last_request_current = data_request_current[0]\n kf_torques = [[] for i in xrange(num_kf)]\n for angle, current, request_current in zip(data_radians, data_current,\n data_request_current):\n # Predict and correct all the parameter swept observers.\n for i, observer in enumerate(haptic_observers):\n observer.Y = numpy.matrix([[angle]])\n if run_correct:\n observer.CorrectObserver(numpy.matrix([[current]]))\n kf_torques[i].append(-observer.X_hat[2, 0])\n observer.PredictObserver(numpy.matrix([[current]]))\n observer.PredictObserver(numpy.matrix([[current]]))\n\n # Predict and correct the main observer.\n haptic_observer.Y = numpy.matrix([[angle]])\n if run_correct:\n haptic_observer.CorrectObserver(numpy.matrix([[current]]))\n kf_position.append(haptic_observer.X_hat[0, 0])\n adjusted_position.append(kf_position[-1] -\n last_request_current / params.kP)\n last_request_current = last_request_current * params.kCompensationTimeconstant + request_current * (\n 1.0 - params.kCompensationTimeconstant)\n kf_velocity.append(haptic_observer.X_hat[1, 0])\n if last_angle is None:\n last_angle = angle\n dt_velocity.append((angle - last_angle) / 0.001)\n\n haptic_observer.PredictObserver(numpy.matrix([[current]]))\n last_angle = angle\n\n # Plot the wheel observers.\n fig, ax1 = pylab.subplots()\n ax1.plot(data_time, data_radians, '.', label='wheel')\n ax1.plot(data_time, dt_velocity, '.', label='dt_velocity')\n ax1.plot(data_time, kf_velocity, '.', label='kf_velocity')\n ax1.plot(data_time, kf_position, '.', label='kf_position')\n ax1.plot(data_time, adjusted_position, '.', label='adjusted_position')\n\n ax2 = ax1.twinx()\n ax2.plot(data_time, data_current, label='data_current')\n ax2.plot(data_time, data_request_current, label='request_current')\n\n for i, kf_torque in enumerate(kf_torques):\n ax2.plot(\n data_time,\n kf_torque,\n label='-kf_torque[%f]' % haptic_observers[i].J)\n fig.tight_layout()\n ax1.legend(loc=3)\n ax2.legend(loc=4)\n\n\ndef plot_input(data_time,\n data_radians,\n data_velocity,\n data_torque,\n data_current,\n params,\n run_correct=True):\n dt_velocity = []\n last_angle = None\n initial_velocity = (data_radians[1] - data_radians[0]) * 1000.0\n\n for angle in data_radians:\n if last_angle is None:\n last_angle = angle\n dt_velocity.append((angle - last_angle) / 0.001)\n\n last_angle = angle\n\n # Plot the wheel observers.\n fig, ax1 = pylab.subplots()\n ax1.plot(data_time, data_radians, '.', label='angle')\n ax1.plot(data_time, data_velocity, '-', label='velocity')\n ax1.plot(data_time, dt_velocity, '.', label='dt_velocity')\n\n ax2 = ax1.twinx()\n ax2.plot(data_time, data_torque, label='data_torque')\n ax2.plot(data_time, data_current, label='data_current')\n fig.tight_layout()\n ax1.legend(loc=3)\n ax2.legend(loc=4)\n\n\ndef main(argv):\n if FLAGS.plot:\n if FLAGS.data is None:\n haptic_wheel = HapticInput()\n haptic_wheel_controller = IntegralHapticInput()\n observer_haptic_wheel = IntegralHapticInput()\n observer_haptic_wheel.X_hat[2, 0] = 0.01\n\n R = numpy.matrix([[0.0], [0.0], [0.0]])\n\n control_loop.TestSingleIntegralAxisSquareWave(\n R, 1.0, haptic_wheel, haptic_wheel_controller,\n observer_haptic_wheel)\n else:\n # Read the CAN trace in.\n trigger_data_time, wheel_data_time, trigger, wheel, trigger_velocity, \\\n wheel_velocity, trigger_torque, wheel_torque, trigger_current, \\\n wheel_current, trigger_request_time, trigger_request_current, \\\n wheel_request_time, wheel_request_current = ReadCan(FLAGS.data)\n\n wheel_radians = [w * numpy.pi * (338.16 / 360.0) for w in wheel]\n wheel_velocity = [w * 50.0 for w in wheel_velocity]\n wheel_torque = [w / 2.0 for w in wheel_torque]\n wheel_current = [w * 10.0 for w in wheel_current]\n wheel_request_current = [w * 2.0 for w in wheel_request_current]\n resampled_wheel_request_current = scipy.interpolate.interp1d(\n wheel_request_time, wheel_request_current,\n kind=\"zero\")(wheel_data_time)\n\n trigger_radians = [t * numpy.pi * (45.0 / 360.0) for t in trigger]\n trigger_velocity = [t * 50.0 for t in trigger_velocity]\n trigger_torque = [t / 2.0 for t in trigger_torque]\n trigger_current = [t * 10.0 for t in trigger_current]\n trigger_request_current = [t * 2.0 for t in trigger_request_current]\n resampled_trigger_request_current = scipy.interpolate.interp1d(\n trigger_request_time, trigger_request_current,\n kind=\"zero\")(trigger_data_time)\n\n if FLAGS.rerun_kf:\n rerun_and_plot_kf(\n trigger_data_time,\n trigger_radians,\n trigger_current,\n resampled_trigger_request_current,\n kTrigger,\n run_correct=True)\n rerun_and_plot_kf(\n wheel_data_time,\n wheel_radians,\n wheel_current,\n resampled_wheel_request_current,\n kWheel,\n run_correct=True)\n else:\n plot_input(trigger_data_time, trigger_radians, trigger_velocity,\n trigger_torque, trigger_current, kTrigger)\n plot_input(wheel_data_time, wheel_radians, wheel_velocity,\n wheel_torque, wheel_current, kWheel)\n pylab.show()\n\n return\n\n if len(argv) != 9:\n glog.fatal('Expected .h file name and .cc file name')\n else:\n namespaces = ['frc971', 'control_loops', 'drivetrain']\n for name, params, filenames in [('HapticWheel', kWheel, argv[1:5]),\n ('HapticTrigger', kTrigger, argv[5:9])]:\n haptic_input = HapticInput(params=params, name=name)\n loop_writer = control_loop.ControlLoopWriter(\n name, [haptic_input],\n namespaces=namespaces,\n scalar_type='float')\n loop_writer.Write(filenames[0], filenames[1])\n\n integral_haptic_input = IntegralHapticInput(\n params=params, name='Integral' + name)\n integral_loop_writer = control_loop.ControlLoopWriter(\n 'Integral' + name, [integral_haptic_input],\n namespaces=namespaces,\n scalar_type='float')\n\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"Dt\", \"%f\",\n integral_haptic_input.dt))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"FreeCurrent\", \"%f\",\n integral_haptic_input.motor.free_current))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"StallTorque\", \"%f\",\n integral_haptic_input.motor.stall_torque))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"J\", \"%f\",\n integral_haptic_input.J))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"R\", \"%f\",\n integral_haptic_input.motor.resistance))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"T\", \"%f\",\n integral_haptic_input.motor.Kt))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"V\", \"%f\",\n integral_haptic_input.motor.Kv))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"P\", \"%f\", params.kP))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"D\", \"%f\", params.kD))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"G\", \"%f\", params.G))\n integral_loop_writer.AddConstant(\n control_loop.Constant(\"k\" + name + \"CurrentLimit\", \"%f\",\n params.current_limit))\n\n integral_loop_writer.Write(filenames[2], filenames[3])\n\n\nif __name__ == '__main__':\n argv = FLAGS(sys.argv)\n sys.exit(main(argv))\n",
"#!/usr/bin/python3\nimport unittest\n\nimport matplotlib\n# Use a non-interactive backend so that the test can actually run...\nmatplotlib.use('Agg')\n\nimport frc971.analysis.plot\n\n\nclass PlotterTest(unittest.TestCase):\n def test_plotter(self):\n \"\"\"Basic test that makes sure that we can run the test without crashing.\"\"\"\n self.assertEqual(0,\n frc971.analysis.plot.main([\n \"binary\", \"--logfile\",\n \"external/sample_logfile/file/log.fbs\",\n \"--config\", \"gyro.pb\"\n ]))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.matrix",
"matplotlib.pylab.show",
"numpy.log10",
"matplotlib.pylab.subplots",
"numpy.zeros"
],
[
"matplotlib.use"
]
] |
joe733/client
|
[
"5e6758129a5557fd478d1cf2f46b4b7279ef6d04"
] |
[
"tests/test_data_types.py"
] |
[
"import wandb\nfrom wandb import data_types\nimport numpy as np\nimport pytest\nimport PIL\nimport os\nimport six\nimport sys\nimport glob\nimport platform\nfrom click.testing import CliRunner\nfrom . import utils\nfrom .utils import dummy_data\nimport matplotlib\nimport rdkit.Chem\nfrom wandb import Api\nimport time\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt # noqa: E402\n\ndata = np.random.randint(255, size=(1000))\n\n\n@pytest.fixture\ndef api(runner):\n return Api()\n\n\ndef test_wb_value(live_mock_server, test_settings):\n run = wandb.init(settings=test_settings)\n local_art = wandb.Artifact(\"N\", \"T\")\n public_art = run.use_artifact(\"N:latest\")\n\n wbvalue = data_types.WBValue()\n with pytest.raises(NotImplementedError):\n wbvalue.to_json(local_art)\n\n with pytest.raises(NotImplementedError):\n data_types.WBValue.from_json({}, public_art)\n\n assert data_types.WBValue.with_suffix(\"item\") == \"item.json\"\n\n table = data_types.WBValue.init_from_json(\n {\n \"_type\": \"table\",\n \"data\": [[]],\n \"columns\": [],\n \"column_types\": wandb.data_types._dtypes.TypedDictType({}).to_json(),\n },\n public_art,\n )\n assert isinstance(table, data_types.WBValue) and isinstance(\n table, wandb.data_types.Table\n )\n\n type_mapping = data_types.WBValue.type_mapping()\n assert all(\n [issubclass(type_mapping[key], data_types.WBValue) for key in type_mapping]\n )\n\n assert wbvalue == wbvalue\n assert wbvalue != data_types.WBValue()\n\n\n@pytest.mark.skipif(sys.version_info >= (3, 10), reason=\"no pandas py3.10 wheel\")\ndef test_log_dataframe(live_mock_server, test_settings):\n import pandas as pd\n\n run = wandb.init(settings=test_settings)\n cv_results = pd.DataFrame(data={\"test_col\": [1, 2, 3], \"test_col2\": [4, 5, 6]})\n run.log({\"results_df\": cv_results})\n run.finish()\n ctx = live_mock_server.get_ctx()\n assert len(ctx[\"artifacts\"]) == 1\n\n\ndef test_raw_data():\n wbhist = wandb.Histogram(data)\n assert len(wbhist.histogram) == 64\n\n\ndef test_np_histogram():\n wbhist = wandb.Histogram(np_histogram=np.histogram(data))\n assert len(wbhist.histogram) == 10\n\n\ndef test_manual_histogram():\n wbhist = wandb.Histogram(np_histogram=([1, 2, 4], [3, 10, 20, 0]))\n assert len(wbhist.histogram) == 3\n\n\ndef test_invalid_histogram():\n with pytest.raises(ValueError):\n wandb.Histogram(np_histogram=([1, 2, 3], [1]))\n\n\nimage = np.zeros((28, 28))\n\n\ndef test_captions():\n wbone = wandb.Image(image, caption=\"Cool\")\n wbtwo = wandb.Image(image, caption=\"Nice\")\n assert wandb.Image.all_captions([wbone, wbtwo]) == [\"Cool\", \"Nice\"]\n\n\ndef test_bind_image(mocked_run):\n wb_image = wandb.Image(image)\n wb_image.bind_to_run(mocked_run, \"stuff\", 10)\n assert wb_image.is_bound()\n\n\nfull_box = {\n \"position\": {\"middle\": (0.5, 0.5), \"width\": 0.1, \"height\": 0.2},\n \"class_id\": 2,\n \"box_caption\": \"This is a big car\",\n \"scores\": {\"acc\": 0.3},\n}\n\n\n# Helper function return a new dictionary with the key removed\ndef dissoc(d, key):\n new_d = d.copy()\n new_d.pop(key)\n return new_d\n\n\noptional_keys = [\"box_caption\", \"scores\"]\nboxes_with_removed_optional_args = [dissoc(full_box, k) for k in optional_keys]\n\n\ndef test_image_accepts_other_images(mocked_run):\n image_a = wandb.Image(np.random.random((300, 300, 3)))\n image_b = wandb.Image(image_a)\n assert image_a == image_b\n\n\ndef test_image_accepts_bounding_boxes(mocked_run):\n img = wandb.Image(image, boxes={\"predictions\": {\"box_data\": [full_box]}})\n img.bind_to_run(mocked_run, \"images\", 0)\n img_json = img.to_json(mocked_run)\n path = img_json[\"boxes\"][\"predictions\"][\"path\"]\n assert os.path.exists(os.path.join(mocked_run.dir, path))\n\n\ndef test_image_accepts_bounding_boxes_optional_args(mocked_run):\n img = data_types.Image(\n image, boxes={\"predictions\": {\"box_data\": boxes_with_removed_optional_args}}\n )\n img.bind_to_run(mocked_run, \"images\", 0)\n img_json = img.to_json(mocked_run)\n path = img_json[\"boxes\"][\"predictions\"][\"path\"]\n assert os.path.exists(os.path.join(mocked_run.dir, path))\n\n\nstandard_mask = {\n \"mask_data\": np.array([[1, 2, 2, 2], [2, 3, 3, 4], [4, 4, 4, 4], [4, 4, 4, 2]]),\n \"class_labels\": {1: \"car\", 2: \"pedestrian\", 3: \"tractor\", 4: \"cthululu\"},\n}\n\n\ndef test_image_accepts_masks(mocked_run):\n img = wandb.Image(image, masks={\"overlay\": standard_mask})\n img.bind_to_run(mocked_run, \"images\", 0)\n img_json = img.to_json(mocked_run)\n path = img_json[\"masks\"][\"overlay\"][\"path\"]\n assert os.path.exists(os.path.join(mocked_run.dir, path))\n\n\ndef test_image_accepts_masks_without_class_labels(mocked_run):\n img = wandb.Image(image, masks={\"overlay\": dissoc(standard_mask, \"class_labels\")})\n img.bind_to_run(mocked_run, \"images\", 0)\n img_json = img.to_json(mocked_run)\n path = img_json[\"masks\"][\"overlay\"][\"path\"]\n assert os.path.exists(os.path.join(mocked_run.dir, path))\n\n\ndef test_cant_serialize_to_other_run(mocked_run, test_settings):\n \"\"\"This isn't implemented yet. Should work eventually.\"\"\"\n other_run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)\n other_run._set_backend(mocked_run._backend)\n wb_image = wandb.Image(image)\n\n wb_image.bind_to_run(mocked_run, \"stuff\", 10)\n\n with pytest.raises(AssertionError):\n wb_image.to_json(other_run)\n\n\ndef test_image_seq_to_json(mocked_run):\n wb_image = wandb.Image(image)\n wb_image.bind_to_run(mocked_run, \"test\", 0, 0)\n meta = wandb.Image.seq_to_json([wb_image], mocked_run, \"test\", 0)\n assert os.path.exists(\n os.path.join(mocked_run.dir, \"media\", \"images\", \"test_0_0.png\")\n )\n\n meta_expected = {\n \"_type\": \"images/separated\",\n \"count\": 1,\n \"height\": 28,\n \"width\": 28,\n }\n assert utils.subdict(meta, meta_expected) == meta_expected\n\n\ndef test_max_images(caplog, mocked_run):\n large_image = np.random.randint(255, size=(10, 10))\n large_list = [wandb.Image(large_image)] * 200\n large_list[0].bind_to_run(mocked_run, \"test2\", 0, 0)\n meta = wandb.Image.seq_to_json(\n wandb.wandb_sdk.data_types._prune_max_seq(large_list), mocked_run, \"test2\", 0\n )\n expected = {\n \"_type\": \"images/separated\",\n \"count\": data_types.Image.MAX_ITEMS,\n \"height\": 10,\n \"width\": 10,\n }\n path = os.path.join(mocked_run.dir, \"media/images/test2_0_0.png\")\n assert utils.subdict(meta, expected) == expected\n assert os.path.exists(os.path.join(mocked_run.dir, \"media/images/test2_0_0.png\"))\n\n\ndef test_audio_sample_rates():\n audio1 = np.random.uniform(-1, 1, 44100)\n audio2 = np.random.uniform(-1, 1, 88200)\n wbaudio1 = wandb.Audio(audio1, sample_rate=44100)\n wbaudio2 = wandb.Audio(audio2, sample_rate=88200)\n assert wandb.Audio.sample_rates([wbaudio1, wbaudio2]) == [44100, 88200]\n # test with missing sample rate\n with pytest.raises(ValueError):\n wandb.Audio(audio1)\n\n\ndef test_audio_durations():\n audio1 = np.random.uniform(-1, 1, 44100)\n audio2 = np.random.uniform(-1, 1, 88200)\n wbaudio1 = wandb.Audio(audio1, sample_rate=44100)\n wbaudio2 = wandb.Audio(audio2, sample_rate=44100)\n assert wandb.Audio.durations([wbaudio1, wbaudio2]) == [1.0, 2.0]\n\n\ndef test_audio_captions():\n audio = np.random.uniform(-1, 1, 44100)\n sample_rate = 44100\n caption1 = \"This is what a dog sounds like\"\n caption2 = \"This is what a chicken sounds like\"\n # test with all captions\n wbaudio1 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption1)\n wbaudio2 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption2)\n assert wandb.Audio.captions([wbaudio1, wbaudio2]) == [caption1, caption2]\n # test with no captions\n wbaudio3 = wandb.Audio(audio, sample_rate=sample_rate)\n wbaudio4 = wandb.Audio(audio, sample_rate=sample_rate)\n assert wandb.Audio.captions([wbaudio3, wbaudio4]) is False\n # test with some captions\n wbaudio5 = wandb.Audio(audio, sample_rate=sample_rate)\n wbaudio6 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption2)\n assert wandb.Audio.captions([wbaudio5, wbaudio6]) == [\"\", caption2]\n\n\ndef test_audio_to_json(mocked_run):\n audio = np.zeros(44100)\n audioObj = wandb.Audio(audio, sample_rate=44100)\n audioObj.bind_to_run(mocked_run, \"test\", 0)\n meta = wandb.Audio.seq_to_json([audioObj], mocked_run, \"test\", 0)\n assert os.path.exists(os.path.join(mocked_run.dir, meta[\"audio\"][0][\"path\"]))\n\n meta_expected = {\n \"_type\": \"audio\",\n \"count\": 1,\n \"sampleRates\": [44100],\n \"durations\": [1.0],\n }\n assert utils.subdict(meta, meta_expected) == meta_expected\n\n audio_expected = {\n \"_type\": \"audio-file\",\n \"caption\": None,\n \"size\": 88244,\n }\n assert utils.subdict(meta[\"audio\"][0], audio_expected) == audio_expected\n\n\ndef test_audio_refs():\n audioObj = wandb.Audio(\n \"https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav\"\n )\n art = wandb.Artifact(\"audio_ref_test\", \"dataset\")\n art.add(audioObj, \"audio_ref\")\n\n audio_expected = {\n \"_type\": \"audio-file\",\n \"caption\": None,\n }\n assert utils.subdict(audioObj.to_json(art), audio_expected) == audio_expected\n\n\ndef test_guess_mode():\n image = np.random.randint(255, size=(28, 28, 3))\n wbimg = wandb.Image(image)\n assert wbimg.image.mode == \"RGB\"\n\n\ndef test_pil():\n pil = PIL.Image.new(\"L\", (28, 28))\n img = wandb.Image(pil)\n assert list(img.image.getdata()) == list(pil.getdata())\n\n\ndef test_matplotlib_image():\n plt.plot([1, 2, 2, 4])\n img = wandb.Image(plt)\n assert img.image.width == 640\n\n\ndef test_matplotlib_image_with_multiple_axes():\n \"\"\"Ensures that wandb.Image constructor can accept a pyplot or figure\n reference in which the figure has multiple axes. Importantly, there is\n no requirement that any of the axes have plotted data.\n \"\"\"\n for fig in utils.matplotlib_multiple_axes_figures():\n wandb.Image(fig) # this should not error.\n\n for fig in utils.matplotlib_multiple_axes_figures():\n wandb.Image(plt) # this should not error.\n\n\n@pytest.mark.skipif(\n sys.version_info >= (3, 9), reason=\"plotly doesn't support py3.9 yet\"\n)\ndef test_matplotlib_plotly_with_multiple_axes():\n \"\"\"Ensures that wandb.Plotly constructor can accept a plotly figure\n reference in which the figure has multiple axes. Importantly, there is\n no requirement that any of the axes have plotted data.\n \"\"\"\n for fig in utils.matplotlib_multiple_axes_figures():\n wandb.Plotly(fig) # this should not error.\n\n for fig in utils.matplotlib_multiple_axes_figures():\n wandb.Plotly(plt) # this should not error.\n\n\ndef test_plotly_from_matplotlib_with_image():\n \"\"\"Ensures that wandb.Plotly constructor properly errors when\n a pyplot with image is passed\n \"\"\"\n # try the figure version\n fig = utils.matplotlib_with_image()\n with pytest.raises(ValueError):\n wandb.Plotly(fig)\n plt.close()\n\n # try the plt version\n fig = utils.matplotlib_with_image()\n with pytest.raises(ValueError):\n wandb.Plotly(plt)\n plt.close()\n\n\ndef test_image_from_matplotlib_with_image():\n \"\"\"Ensures that wandb.Image constructor supports a pyplot with image is passed\"\"\"\n # try the figure version\n fig = utils.matplotlib_with_image()\n wandb.Image(fig) # this should not error.\n plt.close()\n\n # try the plt version\n fig = utils.matplotlib_with_image()\n wandb.Image(plt) # this should not error.\n plt.close()\n\n\n@pytest.mark.skipif(\n sys.version_info >= (3, 9), reason=\"plotly doesn't support py3.9 yet\"\n)\ndef test_make_plot_media_from_matplotlib_without_image():\n \"\"\"Ensures that wand.Plotly.make_plot_media() returns a Plotly object when\n there is no image\n \"\"\"\n fig = utils.matplotlib_without_image()\n assert type(wandb.Plotly.make_plot_media(fig)) == wandb.Plotly\n plt.close()\n\n fig = utils.matplotlib_without_image()\n assert type(wandb.Plotly.make_plot_media(plt)) == wandb.Plotly\n plt.close()\n\n\ndef test_make_plot_media_from_matplotlib_with_image():\n \"\"\"Ensures that wand.Plotly.make_plot_media() returns an Image object when\n there is an image in the matplotlib figure\n \"\"\"\n fig = utils.matplotlib_with_image()\n assert type(wandb.Plotly.make_plot_media(fig)) == wandb.Image\n plt.close()\n\n fig = utils.matplotlib_with_image()\n assert type(wandb.Plotly.make_plot_media(plt)) == wandb.Image\n plt.close()\n\n\ndef test_create_bokeh_plot(mocked_run):\n \"\"\"Ensures that wandb.Bokeh constructor accepts a bokeh plot\"\"\"\n bp = dummy_data.bokeh_plot()\n bp = wandb.data_types.Bokeh(bp)\n bp.bind_to_run(mocked_run, \"bokeh\", 0)\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy_gif(mocked_run):\n video = np.random.randint(255, size=(10, 3, 28, 28))\n vid = wandb.Video(video, format=\"gif\")\n vid.bind_to_run(mocked_run, \"videos\", 0)\n assert vid.to_json(mocked_run)[\"path\"].endswith(\".gif\")\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy_mp4(mocked_run):\n video = np.random.randint(255, size=(10, 3, 28, 28))\n vid = wandb.Video(video, format=\"mp4\")\n vid.bind_to_run(mocked_run, \"videos\", 0)\n assert vid.to_json(mocked_run)[\"path\"].endswith(\".mp4\")\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy_multi(mocked_run):\n video = np.random.random(size=(2, 10, 3, 28, 28))\n vid = wandb.Video(video)\n vid.bind_to_run(mocked_run, \"videos\", 0)\n assert vid.to_json(mocked_run)[\"path\"].endswith(\".gif\")\n\n\n@pytest.mark.skipif(sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy_invalid():\n video = np.random.random(size=(3, 28, 28))\n with pytest.raises(ValueError):\n wandb.Video(video)\n\n\ndef test_video_path(mocked_run):\n with open(\"video.mp4\", \"w\") as f:\n f.write(\"00000\")\n vid = wandb.Video(\"video.mp4\")\n vid.bind_to_run(mocked_run, \"videos\", 0)\n assert vid.to_json(mocked_run)[\"path\"].endswith(\".mp4\")\n\n\ndef test_video_path_invalid(runner):\n with runner.isolated_filesystem():\n with open(\"video.avi\", \"w\") as f:\n f.write(\"00000\")\n with pytest.raises(ValueError):\n wandb.Video(\"video.avi\")\n\n\ndef test_molecule(runner, mocked_run):\n with runner.isolated_filesystem():\n with open(\"test.pdb\", \"w\") as f:\n f.write(\"00000\")\n mol = wandb.Molecule(\"test.pdb\")\n mol.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Molecule.seq_to_json([mol], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(mol._path)\n\n\ndef test_molecule_file(runner, mocked_run):\n with runner.isolated_filesystem():\n with open(\"test.pdb\", \"w\") as f:\n f.write(\"00000\")\n mol = wandb.Molecule(open(\"test.pdb\", \"r\"))\n mol.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Molecule.seq_to_json([mol], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(mol._path)\n\n\ndef test_molecule_from_smiles(runner, mocked_run):\n \"\"\"Ensures that wandb.Molecule.from_smiles supports valid SMILES molecule string representations\"\"\"\n with runner.isolated_filesystem():\n mol = wandb.Molecule.from_smiles(\"CC(=O)Nc1ccc(O)cc1\")\n mol.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Molecule.seq_to_json([mol], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(mol._path)\n\n\ndef test_molecule_from_invalid_smiles(runner, mocked_run):\n \"\"\"Ensures that wandb.Molecule.from_smiles errs if passed an invalid SMILES string\"\"\"\n with pytest.raises(ValueError):\n wandb.Molecule.from_smiles(\"TEST\")\n\n\ndef test_molecule_from_rdkit_mol_object(runner, mocked_run):\n \"\"\"Ensures that wandb.Molecule.from_rdkit supports rdkit.Chem.rdchem.Mol objects\"\"\"\n with runner.isolated_filesystem():\n mol = wandb.Molecule.from_rdkit(rdkit.Chem.MolFromSmiles(\"CC(=O)Nc1ccc(O)cc1\"))\n mol.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Molecule.seq_to_json([mol], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(mol._path)\n\n\ndef test_molecule_from_rdkit_mol_file(runner, mocked_run):\n \"\"\"Ensures that wandb.Molecule.from_rdkit supports .mol files\"\"\"\n with runner.isolated_filesystem():\n substance = rdkit.Chem.MolFromSmiles(\"CC(=O)Nc1ccc(O)cc1\")\n mol_file_name = \"test.mol\"\n rdkit.Chem.rdmolfiles.MolToMolFile(substance, mol_file_name)\n mol = wandb.Molecule.from_rdkit(mol_file_name)\n mol.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Molecule.seq_to_json([mol], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(mol._path)\n\n\ndef test_molecule_from_rdkit_invalid_input(runner, mocked_run):\n \"\"\"Ensures that wandb.Molecule.from_rdkit errs on invalid input\"\"\"\n mol_file_name = \"test\"\n with pytest.raises(ValueError):\n wandb.Molecule.from_rdkit(mol_file_name)\n\n\ndef test_html_str(mocked_run):\n html = wandb.Html(\"<html><body><h1>Hello</h1></body></html>\")\n html.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Html.seq_to_json([html], mocked_run, \"rad\", \"summary\")\n assert os.path.exists(html._path)\n\n\ndef test_html_styles():\n with CliRunner().isolated_filesystem():\n pre = (\n '<base target=\"_blank\"><link rel=\"stylesheet\" type=\"text/css\" '\n 'href=\"https://app.wandb.ai/normalize.css\" />'\n )\n html = wandb.Html(\"<html><body><h1>Hello</h1></body></html>\")\n assert (\n html.html\n == \"<html><head>\" + pre + \"</head><body><h1>Hello</h1></body></html>\"\n )\n html = wandb.Html(\"<html><head></head><body><h1>Hello</h1></body></html>\")\n assert (\n html.html\n == \"<html><head>\" + pre + \"</head><body><h1>Hello</h1></body></html>\"\n )\n html = wandb.Html(\"<h1>Hello</h1>\")\n assert html.html == pre + \"<h1>Hello</h1>\"\n html = wandb.Html(\"<h1>Hello</h1>\", inject=False)\n assert html.html == \"<h1>Hello</h1>\"\n\n\ndef test_html_file(mocked_run):\n with open(\"test.html\", \"w\") as f:\n f.write(\"<html><body><h1>Hello</h1></body></html>\")\n html = wandb.Html(open(\"test.html\"))\n html.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Html.seq_to_json([html, html], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(html._path)\n\n\ndef test_html_file_path(mocked_run):\n with open(\"test.html\", \"w\") as f:\n f.write(\"<html><body><h1>Hello</h1></body></html>\")\n html = wandb.Html(\"test.html\")\n html.bind_to_run(mocked_run, \"rad\", \"summary\")\n wandb.Html.seq_to_json([html, html], mocked_run, \"rad\", \"summary\")\n\n assert os.path.exists(html._path)\n\n\ndef test_table_default():\n table = wandb.Table()\n table.add_data(\"Some awesome text\", \"Positive\", \"Negative\")\n assert table._to_table_json() == {\n \"data\": [[\"Some awesome text\", \"Positive\", \"Negative\"]],\n \"columns\": [\"Input\", \"Output\", \"Expected\"],\n }\n\n\ndef test_table_eq_debug():\n # Invalid Type\n a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])\n b = {}\n with pytest.raises(AssertionError):\n a._eq_debug(b, True)\n assert a != b\n\n # Mismatch Rows\n a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])\n b = wandb.Table(data=[[1, 2, 3]])\n with pytest.raises(AssertionError):\n a._eq_debug(b, True)\n assert a != b\n\n # Mismatch Columns\n a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])\n b = wandb.Table(data=[[1, 2, 3], [4, 5, 6]], columns=[\"a\", \"b\", \"c\"])\n with pytest.raises(AssertionError):\n a._eq_debug(b, True)\n assert a != b\n\n # Mismatch Types\n a = wandb.Table(data=[[1, 2, 3]])\n b = wandb.Table(data=[[\"1\", \"2\", \"3\"]])\n with pytest.raises(AssertionError):\n a._eq_debug(b, True)\n assert a != b\n\n # Mismatch Data\n a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])\n b = wandb.Table(data=[[1, 2, 3], [4, 5, 100]])\n with pytest.raises(AssertionError):\n a._eq_debug(b, True)\n assert a != b\n\n a = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])\n b = wandb.Table(data=[[1, 2, 3], [4, 5, 6]])\n a._eq_debug(b, True)\n assert a == b\n\n\n@pytest.mark.skipif(sys.version_info >= (3, 10), reason=\"no pandas py3.10 wheel\")\ndef test_table_custom():\n import pandas as pd\n\n table = wandb.Table([\"Foo\", \"Bar\"])\n table.add_data(\"So\", \"Cool\")\n table.add_row(\"&\", \"Rad\")\n assert table._to_table_json() == {\n \"data\": [[\"So\", \"Cool\"], [\"&\", \"Rad\"]],\n \"columns\": [\"Foo\", \"Bar\"],\n }\n df = pd.DataFrame(columns=[\"Foo\", \"Bar\"], data=[[\"So\", \"Cool\"], [\"&\", \"Rad\"]])\n table_df = wandb.Table(dataframe=df)\n assert table._to_table_json() == table_df._to_table_json()\n\n\npoint_cloud_1 = np.array([[0, 0, 0, 1], [0, 0, 1, 13], [0, 1, 0, 2], [0, 1, 0, 4]])\n\npoint_cloud_2 = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 0]])\n\npoint_cloud_3 = np.array(\n [\n [0, 0, 0, 100, 100, 100],\n [0, 0, 1, 100, 100, 100],\n [0, 1, 0, 100, 100, 100],\n [0, 1, 0, 100, 100, 100],\n ]\n)\n\n\ndef test_object3d_numpy(mocked_run):\n obj1 = wandb.Object3D(point_cloud_1)\n obj2 = wandb.Object3D(point_cloud_2)\n obj3 = wandb.Object3D(point_cloud_3)\n obj1.bind_to_run(mocked_run, \"object3d\", 0)\n obj2.bind_to_run(mocked_run, \"object3d\", 1)\n obj3.bind_to_run(mocked_run, \"object3d\", 2)\n assert obj1.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n assert obj2.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n assert obj3.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n\n\ndef test_object3d_dict(mocked_run):\n obj = wandb.Object3D({\"type\": \"lidar/beta\",})\n obj.bind_to_run(mocked_run, \"object3D\", 0)\n assert obj.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n\n\ndef test_object3d_dict_invalid(mocked_run):\n with pytest.raises(ValueError):\n obj = wandb.Object3D({\"type\": \"INVALID\",})\n\n\ndef test_object3d_dict_invalid_string(mocked_run):\n with pytest.raises(ValueError):\n obj = wandb.Object3D(\"INVALID\")\n\n\ndef test_object3d_obj(mocked_run):\n obj = wandb.Object3D(utils.fixture_open(\"cube.obj\"))\n obj.bind_to_run(mocked_run, \"object3D\", 0)\n assert obj.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n\n\ndef test_object3d_gltf(mocked_run):\n obj = wandb.Object3D(utils.fixture_open(\"Box.gltf\"))\n obj.bind_to_run(mocked_run, \"object3D\", 0)\n assert obj.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n\n\ndef test_object3d_io(mocked_run):\n f = utils.fixture_open(\"Box.gltf\")\n body = f.read()\n\n ioObj = six.StringIO(six.u(body))\n obj = wandb.Object3D(ioObj, file_type=\"obj\")\n obj.bind_to_run(mocked_run, \"object3D\", 0)\n assert obj.to_json(mocked_run)[\"_type\"] == \"object3D-file\"\n\n\ndef test_object3d_unsupported_numpy():\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([1]))\n\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([[1, 2], [3, 4], [1, 2]]))\n\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([1, 3, 4, 5, 6, 7, 8, 8, 3]))\n\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([[1, 3, 4, 5, 6, 7, 8, 8, 3]]))\n\n f = utils.fixture_open(\"Box.gltf\")\n body = f.read()\n ioObj = six.StringIO(six.u(body))\n\n with pytest.raises(ValueError):\n wandb.Object3D(ioObj)\n\n\ndef test_object3d_seq_to_json(mocked_run):\n objs = [\n wandb.Object3D(utils.fixture_open(\"Box.gltf\")),\n wandb.Object3D(utils.fixture_open(\"cube.obj\")),\n wandb.Object3D(point_cloud_1),\n ]\n for o in objs:\n o.bind_to_run(mocked_run, \"pc\", 1)\n\n obj = wandb.Object3D.seq_to_json(objs, mocked_run, \"pc\", 1)\n\n box = obj[\"filenames\"][0]\n cube = obj[\"filenames\"][1]\n pts = obj[\"filenames\"][2]\n\n assert os.path.exists(os.path.join(mocked_run.dir, \"media\", \"object3D\", box))\n assert os.path.exists(os.path.join(mocked_run.dir, \"media\", \"object3D\", cube))\n assert os.path.exists(os.path.join(mocked_run.dir, \"media\", \"object3D\", pts))\n\n assert obj[\"_type\"] == \"object3D\"\n assert obj[\"filenames\"] == [\n box,\n cube,\n pts,\n ]\n\n\ndef test_table_init():\n table = wandb.Table(data=[[\"Some awesome text\", \"Positive\", \"Negative\"]])\n assert table._to_table_json() == {\n \"data\": [[\"Some awesome text\", \"Positive\", \"Negative\"]],\n \"columns\": [\"Input\", \"Output\", \"Expected\"],\n }\n\n\ntable_data = [\n [\"a\", 1, True],\n [\"b\", 2, False],\n [\"c\", 3, True],\n]\n\n\ndef test_table_from_list():\n table = wandb.Table(data=table_data)\n assert table.data == table_data\n\n with pytest.raises(AssertionError):\n # raises when user accidentally overrides columns\n table = wandb.Table(table_data)\n\n with pytest.raises(AssertionError):\n # raises when user uses list in \"dataframe\"\n table = wandb.Table(dataframe=table_data)\n\n # legacy\n table = wandb.Table(rows=table_data)\n assert table.data == table_data\n\n\ndef test_table_iterator():\n table = wandb.Table(data=table_data)\n for ndx, row in table.iterrows():\n assert row == table_data[ndx]\n\n table = wandb.Table(data=[])\n assert len([(ndx, row) for ndx, row in table.iterrows()]) == 0\n\n\ndef test_table_from_numpy():\n np_data = np.array(table_data)\n table = wandb.Table(data=np_data)\n assert table.data == np_data.tolist()\n\n with pytest.raises(AssertionError):\n # raises when user accidentally overrides columns\n table = wandb.Table(np_data)\n\n with pytest.raises(AssertionError):\n # raises when user uses list in \"dataframe\"\n table = wandb.Table(dataframe=np_data)\n\n\n@pytest.mark.skipif(sys.version_info >= (3, 10), reason=\"no pandas py3.10 wheel\")\ndef test_table_from_pandas():\n import pandas as pd\n\n pd_data = pd.DataFrame(table_data)\n table = wandb.Table(data=pd_data)\n assert table.data == table_data\n\n with pytest.raises(AssertionError):\n # raises when user accidentally overrides columns\n table = wandb.Table(pd_data)\n\n # legacy\n table = wandb.Table(dataframe=pd_data)\n assert table.data == table_data\n\n\ndef test_graph():\n graph = wandb.Graph()\n node_a = data_types.Node(\"a\", \"Node A\", size=(4,))\n node_b = data_types.Node(\"b\", \"Node B\", size=(16,))\n graph.add_node(node_a)\n graph.add_node(node_b)\n graph.add_edge(node_a, node_b)\n assert graph._to_graph_json() == {\n \"edges\": [[\"a\", \"b\"]],\n \"format\": \"keras\",\n \"nodes\": [\n {\"id\": \"a\", \"name\": \"Node A\", \"size\": (4,)},\n {\"id\": \"b\", \"name\": \"Node B\", \"size\": (16,)},\n ],\n }\n\n\ndef test_numpy_arrays_to_list():\n conv = data_types._numpy_arrays_to_lists\n assert conv(np.array(1)) == [1]\n assert conv(np.array((1, 2,))) == [1, 2]\n assert conv([np.array((1, 2,))]) == [[1, 2]]\n assert conv(np.array(({\"a\": [np.array((1, 2,))]}, 3,))) == [{\"a\": [[1, 2]]}, 3]\n\n\ndef test_partitioned_table_from_json(runner, mock_server, api):\n # This is mocked to return some data\n art = api.artifact(\"entity/project/dummy:v0\", type=\"dataset\")\n ptable = art.get(\"dataset\")\n data = [[0, 0, 1]]\n for ndx, row in ptable.iterrows():\n assert row == data[ndx]\n\n\ndef test_partitioned_table():\n partition_table = wandb.data_types.PartitionedTable(parts_path=\"parts\")\n assert len([(ndx, row) for ndx, row in partition_table.iterrows()]) == 0\n assert partition_table == wandb.data_types.PartitionedTable(parts_path=\"parts\")\n assert partition_table != wandb.data_types.PartitionedTable(parts_path=\"parts2\")\n\n\ndef test_table_column_style():\n # Test Base Cases\n table1 = wandb.Table(columns=[], data=[])\n table1.add_column(\"number\", [1, 2, 3])\n table1.add_data(4)\n with pytest.raises(AssertionError):\n table1.add_column(\"strings\", [\"a\"])\n table1.add_column(\"strings\", [\"a\", \"b\", \"c\", \"d\"])\n table1.set_pk(\"strings\")\n table1.add_data(5, \"e\")\n table1.add_column(\"np_numbers\", np.array([101, 102, 103, 104, 105]))\n\n assert table1.data == [\n [1, \"a\", 101],\n [2, \"b\", 102],\n [3, \"c\", 103],\n [4, \"d\", 104],\n [5, \"e\", 105],\n ]\n\n assert table1.get_column(\"number\") == [1, 2, 3, 4, 5]\n assert table1.get_column(\"strings\") == [\"a\", \"b\", \"c\", \"d\", \"e\"]\n assert table1.get_column(\"np_numbers\") == [101, 102, 103, 104, 105]\n\n assert np.all(\n table1.get_column(\"number\", convert_to=\"numpy\") == np.array([1, 2, 3, 4, 5])\n )\n assert np.all(\n table1.get_column(\"strings\", convert_to=\"numpy\")\n == np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n )\n assert np.all(\n table1.get_column(\"np_numbers\", convert_to=\"numpy\")\n == np.array([101, 102, 103, 104, 105])\n )\n\n ndxs = table1.get_index()\n assert ndxs == [0, 1, 2, 3, 4]\n assert [ndx._table == table1 for ndx in ndxs]\n\n # Test More Images and ndarrays\n rand_1 = np.random.randint(255, size=(32, 32))\n rand_2 = np.random.randint(255, size=(32, 32))\n rand_3 = np.random.randint(255, size=(32, 32))\n img_1 = wandb.Image(rand_1)\n img_2 = wandb.Image(rand_2)\n img_3 = wandb.Image(rand_3)\n\n table2 = wandb.Table(columns=[], data=[])\n table2.add_column(\"np_data\", [rand_1, rand_2])\n table2.add_column(\"image\", [img_1, img_2])\n table2.add_data(rand_3, img_3)\n\n assert table2.data == [[rand_1, img_1], [rand_2, img_2], [rand_3, img_3]]\n assert np.all(\n table2.get_column(\"np_data\", convert_to=\"numpy\")\n == np.array([rand_1, rand_2, rand_3])\n )\n assert table2.get_column(\"image\") == [img_1, img_2, img_3]\n a = table2.get_column(\"image\", convert_to=\"numpy\")\n b = np.array([rand_1, rand_2, rand_3])\n assert np.all(\n table2.get_column(\"image\", convert_to=\"numpy\")\n == np.array([rand_1, rand_2, rand_3])\n )\n\n table3 = wandb.Table(columns=[], data=[])\n table3.add_column(\"table1_fk\", table1.get_column(\"strings\"))\n assert table3.get_column(\"table1_fk\")[0]._table == table1\n\n\ndef test_ndarrays_in_tables():\n rows = 10\n d = 128\n c = 3\n nda_table = wandb.Table(\n columns=[\"ndarray\"], data=np.random.randint(255, size=(rows, 1, d, d, c))\n )\n nda_table.add_data(np.random.randint(255, size=(d, d, c)))\n nda_table.add_data(np.random.randint(255, size=(d, d, c)).tolist())\n with pytest.raises(TypeError):\n nda_table.add_data(np.random.randint(255, size=(d + 1, d, c)))\n with pytest.raises(TypeError):\n nda_table.add_data(np.random.randint(255, size=(d + 1, d, c)).tolist())\n\n assert any(\n [\n isinstance(t, wandb.data_types._dtypes.NDArrayType)\n for t in nda_table._column_types.params[\"type_map\"][\"ndarray\"].params[\n \"allowed_types\"\n ]\n ]\n )\n\n nda_table = wandb.Table(columns=[], data=[])\n nda_table.add_column(\n \"odd_col\",\n [[[i], [i]] for i in range(rows)] + [np.random.randint(255, size=(2, 1))],\n )\n\n assert isinstance(\n nda_table._column_types.params[\"type_map\"][\"odd_col\"],\n wandb.data_types._dtypes.ListType,\n )\n\n nda_table.cast(\"odd_col\", wandb.data_types._dtypes.NDArrayType(shape=(2, 1)))\n nda_table.add_data(np.random.randint(255, size=(2, 1)))\n nda_table.add_data(np.random.randint(255, size=(2, 1)).tolist())\n with pytest.raises(TypeError):\n nda_table.add_data(np.random.randint(255, size=(2, 2)))\n with pytest.raises(TypeError):\n nda_table.add_data(np.random.randint(255, size=(2, 2)).tolist())\n\n assert isinstance(\n nda_table._column_types.params[\"type_map\"][\"odd_col\"],\n wandb.data_types._dtypes.NDArrayType,\n )\n\n\ndef test_table_logging(mocked_run, live_mock_server, test_settings, api):\n run = wandb.init(settings=test_settings)\n run.log(\n {\n \"logged_table\": wandb.Table(\n columns=[\"a\"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],\n )\n }\n )\n run.finish()\n assert True\n\n\ndef test_reference_table_logging(mocked_run, live_mock_server, test_settings, api):\n live_mock_server.set_ctx({\"max_cli_version\": \"0.10.33\"})\n run = wandb.init(settings=test_settings)\n t = wandb.Table(columns=[\"a\"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],)\n run.log({\"logged_table\": t})\n run.log({\"logged_table\": t})\n run.finish()\n assert True\n\n live_mock_server.set_ctx({\"max_cli_version\": \"0.11.0\"})\n run = wandb.init(settings=test_settings)\n t = wandb.Table(columns=[\"a\"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],)\n run.log({\"logged_table\": t})\n run.log({\"logged_table\": t})\n run.finish()\n assert True\n\n\ndef test_reference_table_artifacts(mocked_run, live_mock_server, test_settings, api):\n live_mock_server.set_ctx({\"max_cli_version\": \"0.11.0\"})\n run = wandb.init(settings=test_settings)\n t = wandb.Table(columns=[\"a\"], data=[[wandb.Image(np.ones(shape=(32, 32)))]],)\n\n art = wandb.Artifact(\"A\", \"dataset\")\n art.add(t, \"table\")\n run.log_artifact(art)\n art = wandb.Artifact(\"A\", \"dataset\")\n art.add(t, \"table\")\n run.log_artifact(art)\n\n run.finish()\n assert True\n\n\n# TODO: In another location: need to manually test the internal/backend\n# artifact sender with an artifact that has a reference to be resolved - i\n# think this will get the most coverage\ndef test_table_reference(runner, live_mock_server, test_settings):\n with runner.isolated_filesystem():\n run = wandb.init(settings=test_settings)\n artifact = run.use_artifact(\"dummy:v0\")\n table = artifact.get(\"parts/1\")\n run.log({\"table\": table})\n run.finish()\n assert True\n\n\ndef test_partitioned_table_logging(mocked_run, live_mock_server, test_settings, api):\n run = wandb.init(settings=test_settings)\n run.log({\"logged_table\": wandb.data_types.PartitionedTable(\"parts\")})\n run.finish()\n assert True\n\n\ndef test_joined_table_logging(mocked_run, live_mock_server, test_settings, api):\n run = wandb.init(settings=test_settings)\n art = wandb.Artifact(\"A\", \"dataset\")\n t1 = wandb.Table(\n columns=[\"id\", \"a\"], data=[[1, wandb.Image(np.ones(shape=(32, 32)))]],\n )\n t2 = wandb.Table(\n columns=[\"id\", \"a\"], data=[[1, wandb.Image(np.ones(shape=(32, 32)))]],\n )\n art.add(t1, \"t1\")\n art.add(t2, \"t2\")\n jt = wandb.JoinedTable(t1, t2, \"id\")\n art.add(jt, \"jt\")\n run.log_artifact(art)\n run.log({\"logged_table\": jt})\n run.finish()\n assert True\n\n\ndef test_fail_to_make_file(mocked_run):\n wb_image = wandb.Image(image)\n try:\n wb_image.bind_to_run(mocked_run, \"my key: an identifier\", 0)\n if platform.system() == \"Windows\":\n assert False\n except ValueError as e:\n assert \" is invalid. Please remove invalid filename characters\" in str(e)\n"
] |
[
[
"numpy.random.random",
"numpy.histogram",
"matplotlib.use",
"pandas.DataFrame",
"numpy.ones",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
tom-doerr/lingvo
|
[
"2441edc7fee78903502ebd528ab4dc309db0001d"
] |
[
"lingvo/core/base_model.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport six\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom lingvo.core import base_input_generator\nfrom lingvo.core import base_layer\nfrom lingvo.core import build_data\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import early_stop\nfrom lingvo.core import hyperparams\nfrom lingvo.core import learner\nfrom lingvo.core import optimizer\nfrom lingvo.core import py_utils\nfrom lingvo.core import schedule\nfrom lingvo.core import summary_utils\nfrom lingvo.core import task_scheduler\n\n\ndef CreateTaskGlobalStep(task_name):\n \"\"\"Create if needed and return the global_step.\"\"\"\n with tf.name_scope(None), tf.variable_scope(py_utils.global_variable_scope):\n graph_collections = [tf.GraphKeys.GLOBAL_VARIABLES, 'TASK_GLOBAL_STEP']\n _, v = py_utils.CreateVariable(\n name=task_name + '_global_step',\n params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),\n tf.int64),\n trainable=False,\n collections=graph_collections)\n summary_utils.scalar(v.name, v)\n return v\n\n\nclass StatsCounter(object):\n \"\"\"A single counter in TF.\"\"\"\n\n def __init__(self, name):\n self._name = name\n _, self._var = py_utils.CreateVariable(\n name=name,\n params=py_utils.WeightParams([], py_utils.WeightInit.Constant(0),\n tf.int64),\n trainable=False)\n self._value = self._var.value() + 0 # Makes a copy.\n\n def Value(self):\n \"\"\"Returns the current counter value.\"\"\"\n return self._value\n\n def IncBy(self, params, delta):\n \"\"\"Increment the counter by delta and return the new value.\"\"\"\n # NOTE: We must ensure _value is computed (_var + 0) before\n # updating _var with delta.\n delta = tf.to_int64(delta)\n with tf.control_dependencies([self._value]):\n summary_utils.scalar(self._name, self._value)\n return tf.identity(tf.assign_add(self._var, delta))\n\n\n_LEGACY_LEARNER_PARAMS = [\n 'bprop_variable_filter',\n 'clip_gradient_norm_to_value',\n 'clip_gradient_single_norm_to_value',\n 'colocate_gradients_with_ops',\n 'gate_gradients',\n 'grad_aggregation_method',\n 'grad_norm_to_clip_to_zero',\n 'grad_norm_tracker',\n 'l1_regularizer_weight',\n 'l2_regularizer_weight',\n 'learning_rate',\n 'lr_schedule',\n 'optimizer',\n]\n\n\nclass BaseTask(base_layer.BaseLayer):\n \"\"\"A single encoder/decoder task.\n\n One task usually consists of one InputGenerator, one train_op,\n a list of eval_metrics, etc.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseTask, cls).Params()\n p.Define('input', None, 'Input generator Params.')\n p.Define('encoder', None, 'Encoder Params.')\n p.Define('online_encoder', None, 'Online Encoder Params.')\n p.Define('decoder', None, 'Decoder Params.')\n p.Define('train', hyperparams.Params(),\n 'Params to control how this task should be trained.')\n\n tp = p.train\n tp.Define(\n 'start_up_delay_steps', 200, 'i-th replica starts training after '\n 'i*(i+1)/2*start_up_delay_steps steps')\n tp.Define('max_steps', 4 * 10**6, 'Maximum number of training steps.')\n tp.Define('tpu_steps_per_loop', 100, 'The number of training steps per '\n 'training loop for TPUs.')\n tp.Define(\n 'vn_start_step', 200000000,\n 'Step starting from which variational noise is added to '\n 'params values during training.')\n tp.Define('vn_std', 0.0, 'Std of the variational noise.')\n tp.Define('early_stop', early_stop.EarlyStop.Params(),\n 'Early stopping based on dev-set performance.')\n tp.Define(\n 'ema_decay', 0.0,\n 'If > 0, enable ExponentialMovingAverage during training '\n 'with the give decay. '\n 'Must be < 1. Disabled if <= 0.')\n tp.Define(\n 'init_from_checkpoint_rules', {},\n 'If not None, a dictionary with keys corresponding to a checkpoint '\n 'path and values corresponding to variable loading rules is expected. '\n 'Each key is expected to be a path to a checkpoint from which to '\n 'initialize part of the model. Variables are only loaded from this '\n 'path during initialization and will override values provided by '\n 'initialization.'\n 'The corresponding values (loading_rules) are expected to be a tuple '\n 'consisting of two list: loading rules, and ignore rules, respectively.'\n 'The first list (loading rules) contains the list of variables '\n 'which should be initialized from the checkpoint: each element in the '\n 'list is a pair of strings. The first element is a regex and the '\n 'second is a python format string. If a variable in the model matches '\n 'a regex, we rename using the format string to determine the '\n 'corresponding var in the checkpoint. Note that, it is an error if a '\n 'model variable matches multiple loading rules, for the same '\n 'checkpoint or across checkpoints.'\n 'The second list (ignore rules) is a list of regexes which specify '\n 'variables in the model which should not be initialized using the '\n 'loading rules. Thus, if a variable in the model to be trained matches '\n 'one of the rules in the loading rules, as well as one of the regular '\n 'expressions in the ignore rules, the variable will not be initialized '\n 'from the checkpoint, but will instead be initialized from the '\n 'variable initalizer defined in the graph.'\n 'Examples:'\n '{\"checkpoint_path\": ([(\"(.*)\", \"%s\")], [])} will initialize all the '\n 'model parameters from the checkpoint_path.')\n tp.Define(\n 'pruning_hparams_dict', None, 'Pruning related hyperparameters. A dict '\n 'with hyperparameter: value pairs. See tf.contrib.model_pruning.')\n tp.Define('save_interval_seconds', 60 * 10,\n 'Generates a checkpoint roughly once every this many seconds.')\n tp.Define('summary_interval_steps', 100,\n 'Generates a checkpoint roughly once every this many steps.')\n # The following params must mirror those in Learner.Params().\n # TODO(rpang): migrate existing params to use learner and\n # delete legacy params.\n # LINT.IfChange\n tp.Define(\n 'learner', None, 'One or a list of optimization programs. '\n 'If None, uses a Learner created from the legacy params '\n 'defined below: learning_rate, lr_schedule, optimizer, etc.')\n tp.Define(\n 'l2_regularizer_weight', None,\n 'If not None, L2 regularization to apply to the weights. '\n 'Otherwise, disable L2 regularization.')\n tp.Define(\n 'l1_regularizer_weight', None,\n 'If not None, L1 regularization to apply to the weights. '\n 'Otherwise, disable L1 regularization.')\n tp.Define('learning_rate', 0.0, 'learning rate to use.')\n tp.Define(\n 'clip_gradient_norm_to_value', 0.0,\n 'Clip gradient by global norm to this value. This is similar to '\n 'the bahaviour of tf.clip_by_global_norm, if you are looking for '\n 'tf.clip_by_norm refer to clip_gradient_single_norm_to_value. Note '\n 'these are mutually exclusive.')\n tp.Define(\n 'clip_gradient_single_norm_to_value', 0.0,\n 'Clip gradient by single tensor norm to this value. This is '\n 'similar to the bahaviour of tf.clip_by_norm. Note this is mutually '\n 'exlusive to using clip_gradient_norm_to_value.')\n tp.Define('grad_norm_to_clip_to_zero', 0.0,\n 'Clip gradient to 0 if its norm exceeds this value.')\n tp.Define('grad_norm_tracker', None, 'Params for GradNormTracker.')\n tp.Define('optimizer', optimizer.Adam.Params(), 'Params for the optimizer.')\n tp.Define('lr_schedule', schedule.ContinuousLearningRateSchedule.Params(),\n 'Learning rate decay schedule.')\n tp.Define(\n 'bprop_variable_filter', None,\n 'If set, only backprop variables whose names partially match '\n 'this regexp (re.search).')\n tp.Define(\n 'grad_aggregation_method', tf.AggregationMethod.EXPERIMENTAL_TREE,\n 'Specifies the method used to combine gradient terms. Accepted '\n 'values are constants defined in the class AggregationMethod.')\n tp.Define(\n 'gate_gradients', False,\n 'If True, add a tuple around the gradients returned for an '\n 'operations. This avoids some race conditions.')\n tp.Define('colocate_gradients_with_ops', True,\n 'If True, try colocating gradients with the corresponding op.')\n # LINT.ThenChange(learner.py)\n p.Define('eval', hyperparams.Params(),\n 'Params to control how this task should be evaled.')\n ep = p.eval\n ep.Define(\n 'samples_per_summary', 1000,\n 'If > 0, generates one summary after this many samples, at most. '\n 'If == 0 or the dataset has fewer examples, evaluate the whole set.')\n ep.Define(\n 'decoder_samples_per_summary', 0,\n 'If > 0, each decoder summary will contain at most this many samples. '\n 'If == 0, defaults to `samples_per_summary` for '\n 'backwards compatibility.')\n ep.Define(\n 'load_checkpoint_from', None,\n 'If not None, specifies a location for the checkpoint that '\n 'should be used for eval. One example format is a '\n 'checkpoint directory of a training run.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, BaseTask)\n # Ensure global_step exists before calling super.\n py_utils.GetOrCreateGlobalStepVar()\n super(BaseTask, self).__init__(params)\n\n p = self.params\n\n if p.input:\n # TODO(zhifengc): Consider a simpler way to ensure the input\n # generator stops after one epoch.\n if p.is_eval and p.eval:\n seq_inp = issubclass(p.input.cls,\n base_input_generator.BaseInputGeneratorFromFiles)\n if p.input.num_samples == 0:\n # Dataset size is unknown. Computes eval summary based on num_samples.\n assert p.eval.samples_per_summary > 0\n elif (p.eval.samples_per_summary == 0) or (p.input.num_samples <\n p.eval.samples_per_summary):\n # If we know the dataset size and we want to evaluate the full\n # set, we need to coordinate the input generator to flush out\n # all samples so the evaler and decoder compute metrics on the\n # whole set for each summary step.\n if seq_inp:\n p.input.flush_every_n = p.input.num_samples\n p.eval.samples_per_summary = p.input.num_samples\n if seq_inp and p.input.num_batcher_threads > 1:\n tf.logging.warning('input.num_batcher_threads > 1 inside eval mode. '\n 'The input generator may not iterate over exactly '\n 'one epoch per run')\n\n input_params = self.cluster.PlaceInput(p.input)\n with py_utils.outside_all_rewrites():\n self.CreateChild('input', input_params)\n\n self._encoder = None\n self._online_encoder = None\n self._decoder = None\n\n self._total_examples = None\n self._total_nans_and_infs = None\n self._loss = None\n self._num_predictions = None\n self._train_op = None\n self._eval_metrics = {}\n self._per_example = {}\n self._trainer_verbose_tensors = {}\n\n # Create the gradient mask,\n self._per_input_gradient_mask = None\n task_global_step_list = tf.get_collection('TASK_GLOBAL_STEP',\n '^%s_global_step' % p.name)\n if len(task_global_step_list) > 1:\n raise ValueError('Found multiple task_global_step for task %s' % p.name)\n self._global_step_var = (\n task_global_step_list[0] if len(task_global_step_list) == 1 else\n py_utils.GetOrCreateGlobalStepVar())\n self._global_step = tf.identity(\n self._global_step_var, name='global_step_tensor')\n tp = p.train\n # p.train can be None if this task is the teacher/student task in a\n # DistillationTask.\n if tp and self.cluster.job in ('worker', 'trainer', 'trainer_client',\n 'controller'):\n self._SetLearnerFromLegacyParams(tp)\n if tp.learner is not None:\n if isinstance(tp.learner, (list, tuple)):\n self.CreateChildren('learners', tp.learner)\n else:\n self.CreateChildren('learners', [tp.learner])\n self._UpdateVnConfig()\n\n def _SetLearnerFromLegacyParams(self, tp):\n \"\"\"Sets tp.learner based on legacy params.\"\"\"\n if tp.learner is not None:\n return\n op = learner.Learner.Params()\n tp.learner = op\n op.name = 'loss'\n for k, v in tp.IterParams():\n if k not in _LEGACY_LEARNER_PARAMS:\n tf.logging.info('Ignoring legacy param %s=%s for optimization program',\n k, v)\n continue\n setattr(op, k, v)\n setattr(tp, k, None)\n for line in op.ToText().split('\\n'):\n tf.logging.info('Learner params: %s', line)\n\n def ComputePredictions(self, theta, input_batch):\n \"\"\"Computes predictions for `input_batch`.\n\n The output can be in the form of probablistic distributions, e.g., softmax\n logits for discrete outputs, mixture of logistics for continuous values, or\n regression values.\n\n For training/evaluation, the output will be used for computing loss and\n gradient updates, including comparing predicted distributions between\n teacher and student for distillation. During inference the output can be\n used to compute final outputs, perhaps with sampling.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n\n Returns:\n Predictions, either a single Tensor, a `.NestedMap`, or a namedtuple.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def ComputeLoss(self, theta, input_batch, predictions):\n \"\"\"Computes loss and other metrics for the given predictions.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n predictions: The output of `ComputePredictions`.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def FilterPerExampleTensors(self, per_example):\n \"\"\"Return the per-example tensors ProcessFPropResults needs.\n\n By default we don't send any per-example tensors to ProcessFPropResults\n because some may be expensive to compute. Implement this method to let\n some of them pass through.\n\n Args:\n per_example: A dict of tensors returned as per-example tensors from FProp.\n\n Returns:\n A dict containing a subset of the key/value pairs in per_example.\n \"\"\"\n return {}\n\n def ProcessFPropResults(self, sess, global_step, metrics, per_example):\n \"\"\"Called once for each train loop.\n\n BaseModel.ProcessFPropResults is also called on each loop, so you\n can put your implementation wherever it is most convenient for you.\n\n Args:\n sess: a session.\n global_step: approximate number of model training steps.\n metrics: the metrics dict returned by FPropTower.\n per_example: the per_example dict returned by FPropTower.\n \"\"\"\n pass\n\n def FPropTower(self, theta, input_batch):\n \"\"\"Forward propagation through one tower of the model.\n\n Args:\n theta: A `.NestedMap` object containing variable values of this task\n copied to this tower's devices.\n input_batch: A `.NestedMap` object containing input tensors to this tower.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n predicted = self.ComputePredictions(theta, input_batch)\n return self.ComputeLoss(theta, input_batch, predicted)\n\n def FProp(self, theta, input_batch):\n \"\"\"Forward propagation.\n\n This default `FProp` implementation here supports batch splitting in\n synchronous and asynchronous training when sub-classes implement\n `FPropTower`.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch\n spiltting is used, a list of `NestedMap`, one for each split.\n\n Returns:\n Two dicts:\n A dict containing str keys and (metric, weight) pairs as values, where\n one of the keys is expected to be 'loss'.\n A dict containing arbitrary tensors describing something about each\n training example, where the first dimension of each tensor is the batch\n index.\n \"\"\"\n p = self.params\n with tf.name_scope('fprop'), tf.name_scope(p.name):\n # Always reset step seed at the start of a new global_step.\n py_utils.ResetStepSeed()\n if py_utils.use_tpu():\n metrics, per_example = self._FPropTpu(theta, input_batch)\n else:\n metrics, per_example = self._FPropSplitInputBatch(theta, input_batch)\n self._FPropResult(metrics, per_example)\n return metrics, per_example\n\n def _FPropTpu(self, theta, input_batch):\n p = self.params\n with tf.name_scope('fprop'), tf.name_scope(p.name):\n with tf.name_scope('tower_0_0'):\n metrics, per_example = self.FPropTower(theta, input_batch)\n metrics = py_utils.WeightedAvgOfMetrics([metrics])\n return metrics, per_example\n\n def _FPropSplitInputBatch(self, theta, input_batch):\n \"\"\"Splits the input batch on the input device.\"\"\"\n cluster = self.cluster\n num_splits = cluster.num_splits_per_client\n\n if not isinstance(input_batch, list):\n input_batch = [input_batch]\n\n assert len(input_batch) == num_splits, (len(input_batch), num_splits)\n\n # dev_list_per_replica[i][j] is the i-th worker's j-th device.\n dev_list_per_replica = cluster.available_devices.tolist()\n\n # Asserts invariant of the total number of splits w.r.t.,\n # splits per worker.\n splits_per_replica = cluster.num_splits_per_replica\n assert num_splits == splits_per_replica * len(dev_list_per_replica), (\n num_splits, splits_per_replica, len(dev_list_per_replica))\n\n all_metrics = []\n all_per_example_tensors = []\n for w_id, w_devs in enumerate(dev_list_per_replica):\n # Make local copy of the vars, shard on devices for this worker.\n theta_local = py_utils.CreateLocalTheta(\n theta, w_devs, label='worker %d' % w_id)\n\n for s_id in range(splits_per_replica):\n # s_id-th split for the w_id-th worker.\n split_id = splits_per_replica * w_id + s_id\n with py_utils.ModelSplit(split_id):\n with tf.device(cluster.WorkerDeviceInModelSplit(0)):\n with tf.name_scope('tower_%d_%d' % (w_id, s_id)):\n batch = self.input_generator.PreprocessInputBatch(\n input_batch[split_id])\n metrics, per_example = self.FPropTower(theta_local, batch)\n all_metrics.append(metrics)\n all_per_example_tensors.append(per_example)\n\n return py_utils.WeightedAvgOfMetrics(\n all_metrics), py_utils.ConcatPerExampleTensors(all_per_example_tensors)\n\n def _FPropResult(self, metrics, per_example):\n # Adds stats about the input batch.\n metrics['num_samples_in_batch'] = (tf.convert_to_tensor(\n self.input_generator.GlobalBatchSize()), tf.constant(1.0))\n # Generates summaries.\n for name, (value, weight) in six.iteritems(metrics):\n self.AddEvalMetric(name, value, weight)\n per_example = self.FilterPerExampleTensors(per_example)\n for name, value in six.iteritems(per_example):\n self.AddPerExampleTensor(name, value)\n # Loss.\n self._loss, self._num_predictions = metrics['loss']\n self._loss = py_utils.CheckNumerics(self._loss)\n self._metrics = metrics\n summary_utils.scalar('num_predictions', self._num_predictions)\n\n def GetInputBatch(self):\n \"\"\"Returns input batch from input_generator.\"\"\"\n if py_utils.use_tpu():\n return self.input_generator.CreateTpuFeeds()\n else:\n return self.input_generator.SplitInputBatch(\n self.cluster.num_splits_per_client)\n\n def FPropDefaultTheta(self, input_batch=None):\n \"\"\"Calls `FProp` with this layer's parameters.\"\"\"\n if input_batch is None:\n input_batch = self.GetInputBatch()\n return self.FProp(self.theta, input_batch)\n\n def AdjustGradients(self, vars_gradients):\n \"\"\"Allow for custom gradient manipulation prior to clipping.\"\"\"\n tf.logging.info('BaseTask.AdjustGradients')\n return vars_gradients\n\n def BProp(self):\n self._BPropForVariables(self.vars)\n\n def _BPropForVariables(self, vmap):\n \"\"\"Constructs the backward graph.\"\"\"\n bprop_variable_filters = self.input_generator.GetBpropVariableFilters()\n # Only compute the mask if the variable filters are not empty.\n if bprop_variable_filters != [''] * len(bprop_variable_filters):\n self._ComputeGradientMask(bprop_variable_filters)\n train_ops = {} # mapping from op name to op.\n train_ops['total_samples'] = self.IncrementTotalSamples()\n gradient_mask = None\n if self._per_input_gradient_mask:\n # TODO(neerajgaur): Change this to use source_selected from input_batch.\n onehot = self.input_generator.GetInputSourceOneHot()\n gradient_mask = {\n k: tf.tensordot(v, onehot, 1)\n for k, v in six.iteritems(self._per_input_gradient_mask)\n }\n all_losses = []\n for optimization in self.learners:\n loss_name = optimization.params.name\n metric = self._metrics.get(loss_name, None)\n if metric is None:\n raise ValueError('Loss %s not found in metrics %s' %\n (loss_name, self._metrics.keys()))\n loss = metric[0]\n all_losses.append(loss)\n train_ops['train/%s' % loss_name], stats = optimization.Apply(\n loss,\n vmap,\n gradient_mask=gradient_mask,\n gradient_adjuster=self.AdjustGradients)\n train_ops['stats/%s' % loss_name] = self.IncrementTotalNans(\n tf.to_int32(stats.has_nan_or_inf))\n for key, (value, weight) in six.iteritems(stats.eval_metrics):\n self.AddEvalMetric(key + '/' + loss_name, value, weight)\n\n relevant_bn_updates, _ = py_utils.FindRelevantBatchNormUpdates(\n all_losses, tf.get_collection(py_utils.BATCH_NORM_UPDATES))\n train_ops['bn_updates'] = relevant_bn_updates\n\n # Get the op to update the weight masks and thresholds\n train_ops['mask_updates'] = self._GetMaskUpdateOp()\n\n # Post training step update.\n train_ops['post_step'] = self.PostTrainingStepUpdate(self.global_step)\n\n with tf.control_dependencies(tf.nest.flatten(train_ops)):\n true_global_step = py_utils.GetOrCreateGlobalStepVar()\n with tf.colocate_with(true_global_step):\n increment_global_steps = tf.assign_add(true_global_step, 1)\n if self._global_step_var != true_global_step:\n with tf.colocate_with(self._global_step_var):\n increment_global_steps = tf.group(\n increment_global_steps, tf.assign_add(self._global_step_var, 1))\n train_ops['global_step'] = increment_global_steps\n\n # If we are using Tpu Embeddings, generate the monolithic send\n # gradient op.\n tpu_embedding_activations = tf.get_collection(\n py_utils.TPU_EMBEDDING_ACTIVATIONS)\n if tpu_embedding_activations:\n tpu_embedding_activations_dict = tpu_embedding_activations[0]\n tpu_embedding = tf.get_collection(py_utils.TPU_EMBEDDING)[0]\n tpu_embedding_send_gradient_op = py_utils.ComputeTpuEmbeddingGradients(\n self.loss, tpu_embedding_activations_dict, tpu_embedding)\n train_ops['tpu_embedding'] = tpu_embedding_send_gradient_op\n\n for op_name, op in six.iteritems(train_ops):\n assert op is not None, op_name\n\n # TODO(rpang): try to structure _train_op as:\n # tf.cond(skip_step, <only update skip stats>, <all updates>)\n # so that we skip all other updates when a step is skipped.\n self._train_op = tf.group(*tf.nest.flatten(train_ops), name='bprop')\n\n def _ComputeGradientMask(self, bprop_variable_filters):\n \"\"\"Compute gradient mask for each variable and bprop_variable_filters.\n\n Note that per_input_gradient_mask[var][i] will be 1 if var matches\n bprop_variable_filter[i], 0 otherwise.\n\n Args:\n bprop_variable_filters: A list of regex bprop_variable_filters for each\n file pattern.\n \"\"\"\n self._per_input_gradient_mask = py_utils.NestedMap()\n all_vars = set(self.vars.Flatten())\n for var in all_vars:\n self._per_input_gradient_mask[var.name] = (\n tf.zeros(len(bprop_variable_filters), dtype=tf.float32))\n for i in range(len(bprop_variable_filters)):\n if re.search(bprop_variable_filters[i], var.name):\n self._per_input_gradient_mask[var.name] += (\n tf.one_hot(i, len(bprop_variable_filters), dtype=tf.float32))\n\n def ApplyExponentialMovingAverage(self, ema):\n \"\"\"Wraps `self.train_op` with an op updating exponential moving average.\"\"\"\n # We need to apply EMA to trainable and moving average variable of this\n # Task, not just bprop vars, so that we create a shadow\n # '/ExponentialMovingAverage' variable for every trainable and moving\n # average variable.\n all_vars = set(tf.trainable_variables()) | set(\n tf.moving_average_variables())\n all_vars &= set(self.vars.Flatten())\n for var in all_vars:\n tf.logging.debug('ApplyExponentialMovingAverage: %s', var.name)\n with tf.control_dependencies(\n [self._train_op]), tf.name_scope('moving_average'):\n self._train_op = ema.apply(all_vars)\n\n def Decode(self, input_batch):\n \"\"\"Constructs the inference graph for eval decoding.\n\n Args:\n input_batch: The input batch. A `NestedMap` of tensors. Or, if input batch\n spiltting is used, a list of `NestedMap`, one for each split.\n\n Returns:\n a dict of Tensors as decoder output.\n \"\"\"\n return {}\n\n def Inference(self):\n \"\"\"Constructs the inference graph.\n\n Each subgraph represents a public API for a part of the graph which can\n be operated independently. By convention, the subgraph named 'default'\n should perform end to end inference via the input generator.\n\n Note that having distinct subgraphs (e.g. 'encoder', 'decoder') is\n not just a space optimization: when driving the graph externally in an\n online fashion, evaluation often needs to be broken into pieces. In this\n case, the graph will be constructed with only those pieces.\n\n Returns:\n An `inference_graph_pb2.InferenceGraph` message.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n def CreateDecoderMetrics(self):\n \"\"\"Creates a dict of decoder metrics for `PostProcessDecodeOut` to update.\n\n Returns a dict mapping from string keys to `.BaseMetric` objects.\n \"\"\"\n pass\n\n def PostProcessDecodeOut(self, decode_out_dict, decode_metrics_dict):\n \"\"\"Post-processes decoder out and updates contents of `decode_metrics_dict`.\n\n Args:\n decode_out_dict: A dictionary of Tensors fetched.\n decode_metrics_dict: A dict mapping from string key to `.BaseMetric`\n object as created by `CreateDecoderMetrics`.\n\n Returns:\n output_key_value_pairs - a list of (key, value) pairs that can be saved\n (i.e. of type str, bytes, or unicode).\n \"\"\"\n pass\n\n @property\n def loss(self):\n assert self._loss is not None, ('No loss is defined. Call FProp first.')\n return self._loss\n\n @property\n def train_op(self):\n assert self._train_op is not None, (\n 'No train op is defined. Call BProp first.')\n return self._train_op\n\n @property\n def global_step(self):\n assert self._global_step is not None, ('No global_step is defined.')\n return self._global_step\n\n @property\n def input_generator(self):\n return self.input\n\n @property\n def eval_metrics(self):\n \"\"\"Returns the evaluation metrics.\n\n Returns:\n A map from metric name (a python string) to a tuple (value, weight).\n Both value and weight are scalar Tensors.\n \"\"\"\n return self._eval_metrics\n\n @property\n def per_example_tensors(self):\n \"\"\"Returns per-example outputs.\n\n Returns:\n A map from tensor name (a python string) to a tensor, where the\n first dimension is the batch index of the training example corresponding\n to this output.\n \"\"\"\n return self._per_example\n\n def AddEvalMetric(self, name, value, weight):\n \"\"\"Adds a metric to the eval metrics.\n\n Args:\n name: A python string. The name of the metric.\n value: A scalar Tensor.\n weight: A scalar Tensor.\n\n Raises:\n ValueError: if `name` is already defined.\n\n \"\"\"\n if name in self._eval_metrics:\n raise ValueError('Metric %s has already been defined.' % name)\n self._eval_metrics[name] = (value, weight)\n\n def AddPerExampleTensor(self, name, value):\n if name in self._per_example:\n raise ValueError('Metric %s has already been defined.' % name)\n self._per_example[name] = value\n\n @property\n def total_examples(self):\n \"\"\"Returns the total number of training examples processed so far.\"\"\"\n return self._total_examples.Value()\n\n @property\n def trainer_verbose_tensors(self):\n \"\"\"Return the dict of verbose tensors to eval in the training loop.\"\"\"\n return self._trainer_verbose_tensors\n\n def AddTrainerVerboseTensor(self, name, target):\n \"\"\"Add a (set of) tensors to be evaluated in the training loop.\n\n Args:\n name: A python string. The name of the target(s).\n target: A Tensor or a list or dict of Tensors.\n\n Raises:\n ValueError: if `name` is already defined.\n\n \"\"\"\n if name in self._trainer_verbose_tensors:\n raise ValueError('Verbose target %s has already been defined.' % name)\n self._trainer_verbose_tensors[name] = target\n\n def IncrementTotalSamples(self, value=None):\n \"\"\"Updates the total number of training examples with the batch size.\"\"\"\n p = self.params\n if self._total_examples is None:\n with tf.variable_scope(p.name):\n self._total_examples = StatsCounter('total_samples')\n if value is None:\n assert self.input_generator is not None, ('No input generator defined')\n value = self.input_generator.GlobalBatchSize()\n return self._total_examples.IncBy(p, value)\n\n def IncrementTotalNans(self, value):\n \"\"\"Updates the total number of NaN/Inf gradients by `value`.\"\"\"\n if self._total_nans_and_infs is None:\n with tf.variable_scope(\n py_utils.global_variable_scope, reuse=tf.AUTO_REUSE):\n self._total_nans_and_infs = StatsCounter('total_nan_gradients')\n return self._total_nans_and_infs.IncBy(self.params, value)\n\n def _UpdateVnConfig(self):\n \"\"\"Update vn config from the various vn flags.\"\"\"\n p = self.params\n tp = p.train\n if tp:\n vn_enabled = ((tp.vn_std > 0) and p.vn and\n (p.vn.global_vn or p.vn.per_step_vn))\n if p.is_eval or (not vn_enabled):\n p.vn = py_utils.VariationalNoiseParams(None, False, False)\n else:\n # vn.scale is dependent on global_step.\n p.vn.scale = tf.cast(self.global_step > tp.vn_start_step,\n py_utils.FPropDtype(p)) * tp.vn_std\n\n def _GetMaskUpdateOp(self):\n \"\"\"Returns op to update masks and threshold variables for model pruning.\"\"\"\n p = self.params\n tp = p.train\n mask_update_op = tf.no_op()\n if tp.pruning_hparams_dict:\n assert isinstance(tp.pruning_hparams_dict, dict)\n pruning_hparams = tf.contrib.model_pruning.get_pruning_hparams(\n ).override_from_dict(tp.pruning_hparams_dict)\n pruning_obj = tf.contrib.model_pruning.Pruning(\n pruning_hparams, global_step=self.global_step)\n pruning_obj.add_pruning_summaries()\n mask_update_op = pruning_obj.conditional_mask_update_op()\n return mask_update_op\n\n\nclass DistillationTask(BaseTask):\n \"\"\"A task to distill knowledge from a teacher task to a student task.\n\n The training parameters (e.g., learning rate) are determined only by\n `DistillationTask.params.train`. Teacher and student task's training and eval\n parameters must be set to None.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super(DistillationTask, cls).Params()\n p.Define('teacher', None, 'The teacher task params.')\n p.Define('student', None, 'The student task params.')\n p.Define(\n 'distillation_loss_weight',\n # Only uses distillation loss by default.\n schedule.ConstantOne.Params(),\n 'A schedule of distillation loss weight. '\n 'The weight determines the fraction of total loss contributed by '\n 'distillation loss, while the rest loss will be computed against '\n 'the ground truth. '\n 'A weight of 0 means to only use ground-truth and ignore teacher '\n 'predictions, while a weight 1 means to only use teacher '\n 'predictions and ignore ground truth. '\n 'The weight is specified as a schedule to allow it to change '\n 'during training.')\n p.Define(\n 'teacher_target_type', 'truth', 'The target type for the teacher. '\n 'Choices are: '\n ' \"truth\": using the ground-truth target labels '\n ' \"beam\": using the 1-best hypothesis from the beam search.')\n p.Define(\n 'beam_search_temperature', 1.0, 'The temperature to scale the'\n 'log-prob of each beam search hypothesis. This is used in '\n 'training only')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, DistillationTask)\n super(DistillationTask, self).__init__(params)\n\n p = self.params\n # While student does not need its own input generator for training, it\n # needs an input generator for inference graphs.\n p.student.input = p.input\n # Teacher also might need an input generator, eg. for waveform_processor.\n p.teacher.input = p.input\n with tf.variable_scope(p.name):\n for child in ('teacher', 'student'):\n child_p = getattr(p, child)\n assert issubclass(child_p.cls, BaseTask)\n assert child_p.train is None\n assert child_p.eval is None\n # In theory it's ok for teacher to be a DistillationTask. In practice\n # it probably won't happen.\n assert not issubclass(child_p.cls, DistillationTask)\n child_p.name = child\n self.CreateChild(child, child_p)\n self.CreateChild('distillation_loss_weight', p.distillation_loss_weight)\n\n def ComputePredictions(self, theta, input_batch):\n p = self.params\n with tf.name_scope(p.name):\n if p.teacher_target_type == 'truth':\n teacher_predictions = self.teacher.ComputePredictions(\n theta.teacher, input_batch)\n student_predictions = self.student.ComputePredictions(\n theta.student, input_batch)\n return py_utils.NestedMap(\n teacher=teacher_predictions, student=student_predictions)\n elif p.teacher_target_type == 'beam':\n (teacher_predictions, teacher_input_batch,\n teacher_beam_prob) = self.teacher.ComputeBeamPredictions(\n theta.teacher, input_batch, p.beam_search_temperature)\n # We use 'teacher_input_batch' instead of 'input_batch' for 'student'\n # because the training of student network uses target transcripts for\n # the \"teacher forcing\" mode and here the target transcripts should come\n # from the teacher's beam search.\n student_predictions = self.student.ComputePredictions(\n theta.student, teacher_input_batch)\n return py_utils.NestedMap(\n teacher=teacher_predictions,\n student=student_predictions,\n teacher_beam_prob=teacher_beam_prob)\n else:\n raise ValueError('teacher target type not defined properly: %s' %\n self.p.teacher_target_type)\n\n def ComputeLoss(self, theta, input_batch, predictions):\n per_example = {}\n with tf.name_scope('groundtruth_loss'):\n groundtruth_loss, groundtruth_per_example = self.student.ComputeLoss(\n theta.student, input_batch, predictions.student)\n groundtruth_loss['groundtruth_loss'] = groundtruth_loss['loss']\n per_example.update(groundtruth_per_example)\n\n with tf.name_scope('distillation_loss'):\n distillation_loss, distill_per_example = self.ComputeDistillationLoss(\n theta, input_batch, predictions)\n distillation_loss['distillation_loss'] = distillation_loss['loss']\n per_example.update(distill_per_example)\n\n distillation_loss_weight = self.distillation_loss_weight.FProp(\n theta.distillation_loss_weight, self.global_step)\n metrics = py_utils.CombineMetrics([\n (groundtruth_loss, 1 - distillation_loss_weight),\n (distillation_loss, distillation_loss_weight),\n ])\n return metrics, per_example\n\n def ComputeDistillationLoss(self, theta, input_batch, predictions):\n raise NotImplementedError('Abstract method')\n\n def BProp(self):\n # Only bprop on student variables.\n self._BPropForVariables(self.student.vars)\n\n def Decode(self, input_batch):\n return self.student.Decode(input_batch)\n\n def Inference(self):\n return self.student.Inference()\n\n def CreateDecoderMetrics(self):\n return self.student.CreateDecoderMetrics()\n\n def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):\n return self.student.PostProcessDecodeOut(dec_out_dict, dec_metrics_dict)\n\n\nclass BaseModel(base_layer.BaseLayer):\n \"\"\"The abstract model class. All models are sub-class of this class.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(BaseModel, cls).Params()\n p.Define(\n 'model', None, 'Which python function generates the param. It includes '\n 'the file name and lineno where the function is defined.')\n p.Define(\n 'cluster', cluster_factory.Cluster.Params(),\n 'The training cluster. Individual layer may config differently'\n ' based on training cluster it is running under.')\n p.Define('input', None, 'Input generator Params.')\n p.Define('build_data', build_data.BuildData(), 'Build data of this binary.')\n p.Define('train', hyperparams.Params(),\n 'Params to control how this model should be trained.')\n tp = p.train\n tp.Define(\n 'start_up_delay_steps', 200, 'i-th replica starts training after '\n 'i*(i+1)/2*start_up_delay_steps steps')\n tp.Define('max_steps', 4 * 10**6, 'Training max of 4M steps.')\n tp.Define('tpu_steps_per_loop', 100, 'The number of training steps per '\n 'training loop for TPUs.')\n tp.Define(\n 'ema_decay', 0.0,\n 'If > 0, enable ExponentialMovingAverage during training '\n 'with the give decay. '\n 'Must be < 1. Disabled if <= 0.')\n tp.Define('init_from_checkpoint_rules', {},\n 'See BaseTask documentation for details.')\n tp.Define('early_stop', None,\n 'Early stopping based on dev-set performance.')\n tp.Define('save_interval_seconds', 60 * 10,\n 'Generates a checkpoint roughly once every this many seconds.')\n tp.Define('summary_interval_steps', 100,\n 'Generates a checkpoint roughly once every this many steps.')\n\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n \"\"\"Initializes this Model.\"\"\"\n assert issubclass(params.cls, BaseModel)\n self._global_step_var = py_utils.GetOrCreateGlobalStepVar()\n self._global_step = tf.identity(\n self._global_step_var, name='global_step_tensor')\n super(BaseModel, self).__init__(params)\n # tasks are not yet instantiated.\n self._total_examples_sum = None\n\n self._ema = None\n tp = self.params.train\n tf.logging.info('Training parameters for %s: %s', params.cls, tp)\n if tp.ema_decay > 0:\n assert tp.ema_decay < 1.0\n self._ema = tf.train.ExponentialMovingAverage(\n decay=tp.ema_decay, num_updates=self.global_step)\n\n @property\n def global_step(self):\n assert self._global_step is not None, ('No global_step is defined.')\n return self._global_step\n\n @property\n def ema(self):\n return self._ema\n\n def ConstructFPropBPropGraph(self):\n raise NotImplementedError('Abstract method')\n\n def ConstructFPropGraph(self):\n raise NotImplementedError('Abstract method')\n\n @property\n def tasks(self):\n \"\"\"Returns a list of all tasks.\"\"\"\n raise NotImplementedError('Abstract method')\n\n def GetTask(self, task_name):\n \"\"\"Return the task associated with 'task_name'.\n\n Args:\n task_name: string, the name of the model task to be returned.\n\n Returns:\n An instance of `BaseTask`.\n \"\"\"\n raise NotImplementedError('Abstract method')\n\n @property\n def total_examples(self):\n \"\"\"Returns the total number of training examples processed so far.\"\"\"\n if self._total_examples_sum is None:\n self._total_examples_sum = tf.reduce_sum(\n [task.total_examples for task in self.tasks])\n return self._total_examples_sum\n\n def ProcessFPropResults(self, sess, global_step, metrics, per_example):\n \"\"\"Called once for each train loop.\n\n BaseTask.ProcessFPropResults is also called on each loop, so you\n can put your implementation wherever it is most convenient for you.\n\n Be sure to implement BaseTask.FilterPerExampleTensors if you plan to use any\n per-example tensors in this method.\n\n Args:\n sess: a session.\n global_step: approximate number of model training steps.\n metrics: the metrics dict returned by FPropTower.\n per_example: the per_example dict returned by FPropTower.\n \"\"\"\n pass\n\n\nclass SingleTaskModel(BaseModel):\n \"\"\"Model that consists of a single task.\"\"\"\n\n @classmethod\n def Params(cls, task_params=None):\n p = super(SingleTaskModel, cls).Params()\n p.Define('task', None, 'Task Params.')\n\n if task_params is not None:\n # Copy over model parameters from the task parameters.\n p.task = task_params\n base_layer.BaseLayer.CopyBaseParams(p.task, p)\n tp = p.train\n tp.start_up_delay_steps = p.task.train.start_up_delay_steps\n tp.max_steps = p.task.train.max_steps\n tp.tpu_steps_per_loop = p.task.train.tpu_steps_per_loop\n tp.ema_decay = p.task.train.ema_decay\n # init_from_checkpoint_rules does not need to be copied.\n tp.early_stop = p.task.train.early_stop\n tp.save_interval_seconds = p.task.train.save_interval_seconds\n tp.summary_interval_steps = p.task.train.summary_interval_steps\n\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, SingleTaskModel)\n assert params.task\n p = params.Copy() # Make a copy to avoid modifying the input.\n p.name = p.name or p.task.name\n p.task.name = p.task.name or p.name\n if p.input:\n assert not p.task.input\n p.task.input = p.input\n else:\n assert p.task.input\n p.input = p.task.input\n\n super(SingleTaskModel, self).__init__(p)\n\n p = self.params\n with py_utils.GlobalStepContext(self.global_step):\n self.CreateChild('_task', p.task)\n\n @property\n def tasks(self):\n return [self._task]\n\n def GetTask(self, task_name=None):\n assert not task_name, 'Must not specify >task_name< for single-task model.'\n return self._task\n\n def SampleTask(self, global_step):\n return self._task\n\n def ConstructFPropBPropGraph(self):\n self._task.FPropDefaultTheta()\n self._task.BProp()\n if self.ema:\n tf.logging.info('ApplyExponentialMovingAverage on %s', self._task)\n self._task.ApplyExponentialMovingAverage(self.ema)\n\n def ConstructFPropGraph(self):\n self._task.FPropDefaultTheta()\n\n\nclass MultiTaskModel(BaseModel):\n \"\"\"Model that consists of multiple tasks.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super(MultiTaskModel, cls).Params()\n p.Define('task_params', hyperparams.Params(),\n 'Params object mapping task name to task Params.')\n p.Define(\n 'task_probs', hyperparams.Params(),\n 'Params object mapping task name to the relative likelihood the '\n 'task will be sampled during training.')\n p.Define('task_schedule', None, 'Task schedule.')\n p.Define(\n 'task_global_step', False,\n 'Whether or not to use task-specific global steps, which causes each '\n 'task to use its own global_step instead of the true global_step.')\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n assert issubclass(params.cls, MultiTaskModel)\n super(MultiTaskModel, self).__init__(params)\n p = self.params\n assert len(p.task_params) > 1\n\n # Pass input params to tasks.\n assert isinstance(p.input, hyperparams.Params)\n assert set(dir(p.input)) == set(dir(p.task_params))\n for k, v in p.task_params.IterParams():\n assert isinstance(v, hyperparams.Params)\n assert not v.input\n v.input = p.input.Get(k)\n\n # For compatibility with older API (with p.task_probs)\n if p.task_schedule is None:\n p.task_schedule = task_scheduler.ConstantScheduler.Params()\n p.task_schedule.task_probs = sorted(list(p.task_probs.IterParams()))\n\n # CreateChild copies over global configs in p to individual task params,\n # which then gets propagated down to all sub-layers during\n # BaseTask._PropagateDownGlobalConfigs(), or through sub-sequent CreateChild\n # or CreateChildren calls.\n with py_utils.GlobalStepContext(self.global_step):\n with tf.name_scope(p.name):\n sorted_task_params = sorted(\n (task_name, task_params)\n for task_name, task_params in p.task_params.IterParams())\n for task_name, task_params in sorted_task_params:\n if p.task_global_step:\n assert task_name == task_params.name\n CreateTaskGlobalStep(task_name)\n # Make sure each task is under its own variable scope.\n with tf.variable_scope(task_name):\n self.CreateChild(task_name, task_params)\n self.CreateChild('task_schedule', p.task_schedule)\n\n @property\n def task_names(self):\n sorted_task_names = sorted(\n task_name for task_name, _ in self.params.task_params.IterParams())\n return sorted_task_names\n\n @property\n def tasks(self):\n return [self.children[name] for name in self.task_names]\n\n def GetTask(self, task_name):\n assert task_name, 'Must specify >task_name< for multi-task model.'\n return self.children[task_name]\n\n def SampleTask(self, global_step):\n \"\"\"Sample a task according self.task_schedule.\n\n `self.task_schedule.cur_probs` will also be updated.\n\n Args:\n global_step: int. Current time step.\n \"\"\"\n sampled_task = self.task_schedule.Sample(global_step)\n tf.logging.info('Sampled task: %s', sampled_task)\n return self.children[sampled_task]\n\n def ConstructFPropBPropGraph(self):\n for task_name in self.task_names:\n with tf.name_scope(task_name):\n task = self.GetTask(task_name)\n task.FPropDefaultTheta()\n task.BProp()\n if self.ema:\n task.ApplyExponentialMovingAverage(self.ema)\n\n def ConstructFPropGraph(self):\n for task_name in self.task_names:\n with tf.name_scope(task_name):\n task = self.GetTask(task_name)\n task.FPropDefaultTheta()\n"
] |
[
[
"tensorflow.logging.warning",
"tensorflow.control_dependencies",
"tensorflow.colocate_with",
"tensorflow.logging.debug",
"tensorflow.reduce_sum",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.moving_average_variables",
"tensorflow.nest.flatten",
"tensorflow.to_int32",
"tensorflow.to_int64",
"tensorflow.assign_add",
"tensorflow.get_collection",
"tensorflow.contrib.model_pruning.get_pruning_hparams",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.tensordot",
"tensorflow.identity",
"tensorflow.logging.info",
"tensorflow.no_op",
"tensorflow.constant",
"tensorflow.contrib.model_pruning.Pruning",
"tensorflow.variable_scope"
]
] |
fitnesswanderer/Disaster_Alerts
|
[
"5eb8fb9204ad4e47167c2455fe99ae7d1576fb54"
] |
[
"app/run.py"
] |
[
"import json\nimport plotly\nimport pandas as pd\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///../data/DisasterResponse.db')\ndf = pd.read_sql_table('DisasterResponse', engine)\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\nprint('loaded the pickle now')\n\n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n \n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n vis = df.drop(columns=['id', 'message', 'original', 'genre']);\n \n count = vis.sum().sort_values(ascending=False)\n \n categories = count.index\n #Number of messages for medical help\n medicalhelp_counts = df.groupby(['medical_help']).count()['message']\n category_medicalhelp_names = ['Medical_help' if i==1 else 'Medical_help' for i in list(medicalhelp_counts.index)] \n \n \n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graphs = [\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n {\n 'data': [\n Bar(\n x=categories,\n y=count\n )\n ],\n\n 'layout': {\n 'title': 'Count of Messages for Each Category',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Message Categories\"\n }, \n 'template': 'plotly_dark'\n }\n },\n {\n 'data': [\n Bar(\n x=category_medicalhelp_names,\n y=medicalhelp_counts\n )\n ],\n\n 'layout': {\n 'title': ' Medical_help Messages <br> out of all Messages',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"\"\n }\n }\n }\n ]\n \n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.read_sql_table",
"sklearn.externals.joblib.load"
]
] |
m13uz/FLAML
|
[
"99de9204b3d92703f1afc0773e6d4441b5c14348"
] |
[
"test/nlp/test_autohf_custom_metric.py"
] |
[
"import sys\nimport pytest\n\n\ndef custom_metric(\n X_test,\n y_test,\n estimator,\n labels,\n X_train,\n y_train,\n weight_test=None,\n weight_train=None,\n config=None,\n groups_test=None,\n groups_train=None,\n):\n from datasets import Dataset\n from flaml.model import TransformersEstimator\n\n if estimator._trainer is None:\n estimator._init_model_for_predict(X_test)\n trainer = estimator._trainer\n estimator._trainer = None\n else:\n trainer = estimator._trainer\n if y_test is not None:\n X_test, _ = estimator._preprocess(X_test)\n eval_dataset = Dataset.from_pandas(TransformersEstimator._join(X_test, y_test))\n else:\n X_test, _ = estimator._preprocess(X_test)\n eval_dataset = Dataset.from_pandas(X_test)\n\n estimator_metric_backup = estimator._metric\n estimator._metric = \"rmse\"\n metrics = trainer.evaluate(eval_dataset)\n estimator._metric = estimator_metric_backup\n\n return metrics.pop(\"eval_automl_metric\"), metrics\n\n\n@pytest.mark.skipif(sys.platform == \"darwin\", reason=\"do not run on mac os\")\ndef test_custom_metric():\n from flaml import AutoML\n import pandas as pd\n\n train_data = {\n \"sentence1\": [\n 'Amrozi accused his brother , whom he called \" the witness \" , of deliberately distorting his evidence .',\n \"Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion .\",\n \"They had published an advertisement on the Internet on June 10 , offering the cargo for sale , he added .\",\n \"Around 0335 GMT , Tab shares were up 19 cents , or 4.4 % , at A $ 4.56 , having earlier set a record high of A $ 4.57 .\",\n ],\n \"sentence2\": [\n 'Referring to him as only \" the witness \" , Amrozi accused his brother of deliberately distorting his evidence .',\n \"Yucaipa bought Dominick 's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998 .\",\n \"On June 10 , the ship 's owners had published an advertisement on the Internet , offering the explosives for sale .\",\n \"Tab shares jumped 20 cents , or 4.6 % , to set a record closing high at A $ 4.57 .\",\n ],\n \"label\": [1, 0, 1, 0],\n \"idx\": [0, 1, 2, 3],\n }\n train_dataset = pd.DataFrame(train_data)\n\n dev_data = {\n \"sentence1\": [\n \"The stock rose $ 2.11 , or about 11 percent , to close Friday at $ 21.51 on the New York Stock Exchange .\",\n \"Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier .\",\n \"The Nasdaq had a weekly gain of 17.27 , or 1.2 percent , closing at 1,520.15 on Friday .\",\n \"The DVD-CCA then appealed to the state Supreme Court .\",\n ],\n \"sentence2\": [\n \"PG & E Corp. shares jumped $ 1.63 or 8 percent to $ 21.03 on the New York Stock Exchange on Friday .\",\n \"With the scandal hanging over Stewart 's company , revenue the first quarter of the year dropped 15 percent from the same period a year earlier .\",\n \"The tech-laced Nasdaq Composite .IXIC rallied 30.46 points , or 2.04 percent , to 1,520.15 .\",\n \"The DVD CCA appealed that decision to the U.S. Supreme Court .\",\n ],\n \"label\": [1, 1, 0, 1],\n \"idx\": [4, 5, 6, 7],\n }\n dev_dataset = pd.DataFrame(dev_data)\n\n custom_sent_keys = [\"sentence1\", \"sentence2\"]\n label_key = \"label\"\n\n X_train = train_dataset[custom_sent_keys]\n y_train = train_dataset[label_key]\n\n X_val = dev_dataset[custom_sent_keys]\n y_val = dev_dataset[label_key]\n\n automl = AutoML()\n\n # testing when max_iter=1 and do retrain only without hpo\n\n automl_settings = {\n \"gpu_per_trial\": 0,\n \"max_iter\": 1,\n \"time_budget\": 5,\n \"task\": \"seq-classification\",\n \"metric\": custom_metric,\n \"log_file_name\": \"seqclass.log\",\n }\n\n automl_settings[\"custom_hpo_args\"] = {\n \"model_path\": \"google/electra-small-discriminator\",\n \"output_dir\": \"data/output/\",\n \"ckpt_per_epoch\": 5,\n \"fp16\": False,\n }\n\n automl.fit(\n X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings\n )\n\n # testing calling custom metric in TransformersEstimator._compute_metrics_by_dataset_name\n\n automl_settings[\"max_iter\"] = 3\n automl.fit(\n X_train=X_train, y_train=y_train, X_val=X_val, y_val=y_val, **automl_settings\n )\n\n del automl\n\n\nif __name__ == \"__main__\":\n test_custom_metric()\n"
] |
[
[
"pandas.DataFrame"
]
] |
BoevaLab/scCanSig
|
[
"bbf6de268d0a2eb2ea499253c9e71fecc06f34ea"
] |
[
"cansig/filesys.py"
] |
[
"\"\"\"This module controls the directory structure used to save the results.\"\"\"\nimport abc\nimport datetime\nimport json\nimport pathlib\nfrom typing import Any, Callable, TypeVar\n\nimport pandas as pd # pytype: disable=import-error\nimport petname # pytype: disable=import-error\nimport pydantic # pytype: disable=import-error\n\nimport cansig.types as types\n\n\nclass StructuredDir(abc.ABC):\n def __init__(self, path: types.Pathlike, create: bool = False) -> None:\n self.path = pathlib.Path(path)\n if create:\n self.create()\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({self.path})\"\n\n def create(self) -> None:\n self.path.mkdir(parents=True, exist_ok=False)\n\n @property\n @abc.abstractmethod\n def valid(self) -> bool:\n pass\n\n\n_Settings = TypeVar(\"_Settings\", bound=pydantic.BaseModel)\n\n\ndef read_settings(factory: Callable[[Any], _Settings], path: types.Pathlike) -> _Settings:\n with open(path) as f:\n raw = json.load(f)\n # Annotating a callable which takes `**kwargs` is very tricky,\n # so we simply ignore the typecheck\n return factory(**raw) # pytype: disable=wrong-arg-count\n\n\ndef save_settings(settings: pydantic.BaseModel, path: types.Pathlike) -> None:\n with open(path, \"w\") as f:\n f.write(settings.json())\n\n\ndef save_latent_representations(representations: pd.DataFrame, path: types.Pathlike) -> None:\n representations.to_csv(path, index=True, header=False)\n\n\ndef read_latent_representations(path: types.Pathlike) -> pd.DataFrame:\n return pd.read_csv(path, index_col=0, header=None)\n\n\ndef save_cluster_labels(labels, index, path: types.Pathlike) -> None:\n return pd.DataFrame(labels, index=index).to_csv(path, index=True, header=False)\n\n\ndef read_cluster_labels(path: types.Pathlike) -> pd.DataFrame:\n return pd.read_csv(path, index_col=0, header=False)\n\n\nclass IntegrationDir(StructuredDir):\n MODEL: str = \"integration-settings.json\"\n REPRESENTATIONS: str = \"latent-representations.csv\"\n\n def valid(self) -> bool:\n return self.path.is_dir() and self.latent_representations.is_file() and self.integration_settings.is_file()\n\n @property\n def latent_representations(self) -> pathlib.Path:\n return self.path / self.REPRESENTATIONS\n\n @property\n def integration_settings(self) -> pathlib.Path:\n return self.path / self.MODEL\n\n\nclass PostprocessingDir(StructuredDir):\n LABELS: str = \"cluster-labels.csv\"\n CLUSTER_SETTINGS: str = \"cluster-settings.json\"\n INTEGRATION_SETTINGS: str = IntegrationDir.MODEL\n GSEA_SETTINGS: str = \"gsea-settings.json\"\n\n def valid(self) -> bool:\n return (\n self.path.is_dir()\n and self.cluster_labels.is_file()\n and self.cluster_settings.is_file()\n and self.integration_settings.is_file()\n )\n\n @property\n def cluster_labels(self) -> pathlib.Path:\n return self.path / self.LABELS\n\n @property\n def cluster_settings(self) -> pathlib.Path:\n return self.path / self.CLUSTER_SETTINGS\n\n @property\n def integration_settings(self) -> pathlib.Path:\n return self.path / self.INTEGRATION_SETTINGS\n\n @property\n def gsea_settings(self) -> pathlib.Path:\n return self.path / self.GSEA_SETTINGS\n\n @property\n def gsea_output(self) -> pathlib.Path:\n # TODO(Pawel): Note, the desired output is still discussed.\n return self.path / \"gsea-dataframe.csv\"\n\n @property\n def scatter_output(self) -> pathlib.Path:\n return self.path / \"latent_space_dimred.png\"\n\n\ndef get_directory_name() -> str:\n \"\"\"A string representing a unique name for the run.\"\"\"\n now = datetime.datetime.now() # current date and time\n date_time = now.strftime(\"%Y%m%d-%H%M%S\")\n suffix = petname.generate(separator=\"-\")\n return f\"{date_time}-{suffix}\"\n\n\ndef get_file(file_or_dir: types.Pathlike, ext: str) -> pathlib.Path:\n \"\"\"If the specified file is a directory, checks if there is a unique\n file ending with `ext` inside it.\n\n Returns:\n `file_or_dir` if it is a regular file, otherwise the unique file\n with extension `ext` inside\n `ext`: suffix, e.g. \".csv\", or \".txt\"\n\n Raises:\n FileNotFoundError, if the file doesn't exist\n FileExistsError, if multiple files inside `file_or_dir` have\n matching extension `ext`\n \"\"\"\n fod = pathlib.Path(file_or_dir)\n\n if not fod.exists():\n raise FileNotFoundError(f\"File {file_or_dir} does not exist.\")\n if fod.is_file():\n return fod\n elif fod.is_dir():\n candidates = list(fod.glob(f\"*{ext}\"))\n if len(candidates) == 0:\n raise FileNotFoundError(f\"There are no files with extension {ext} in directory {fod}.\")\n elif len(candidates) >= 2:\n raise FileExistsError(f\"There are too many candidates in {fod}: {candidates}.\")\n else:\n return candidates[0]\n else:\n raise ValueError(f\"File {fod} is neither regular nor a directory.\")\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
theRealSuperMario/lmdis-rep
|
[
"2373877eb75b26e5fec5a3a143eff2f7f7c07c41"
] |
[
"nets/data/pennaction_128x128.py"
] |
[
"import os\nimport cv2\nimport numpy as np\nimport json\nimport time\nimport threading\nimport random\nfrom multiprocessing.dummy import Pool\nfrom multiprocessing import cpu_count\nimport scipy.io as sio\nimport pandas as pd\n\n\nclass Net:\n def __init__(self, subset_name=\"train\", options=None):\n\n self._debug = False\n self._shuffle = False\n self._cache_size = 3000\n self._mean_reduce = False\n self._mean = [5.0, 10.0, 15.0]\n if options != None and options != {}:\n if \"cache_size\" in options:\n self._cache_size = options[\"cache_size\"]\n if \"mean_reduce\" in options:\n self._mean_reduce = options[\"mean_reduce\"]\n if \"shuffle\" in options:\n self._shuffle = options[\"shuffle\"]\n if \"debug\" in options:\n self._debug = options[\"debug\"]\n\n current_path = os.path.dirname(os.path.abspath(__file__))\n root_path = current_path[:-9]\n self._pennaction_train = (\n os.path.join(root_path, \"data/pennaction/denseposed_csv/denseposed_jumping_jacks_jump_rope_train.csv\")\n )\n self._pennaction_test = (\n os.path.join(root_path, \"data/pennaction/denseposed_csv/denseposed_jumping_jacks_jump_rope_test.csv\")\n )\n self._impath = root_path + \"data/pennaction/\"\n\n # with open(self._cat_train, \"r\") as f:\n # self._train_imlist = f.read().splitlines()\n self._train_imlist = pd.read_csv(self._pennaction_train)[\"im1\"]\n self._test_imlist = pd.read_csv(self._pennaction_test)[\"im1\"]\n if subset_name == \"train\":\n self._imlist = self._train_imlist\n if subset_name == \"test\":\n self._imlist = self._test_imlist\n self._num_samples = len(self._imlist)\n self._waitlist = list(range(len(self._imlist)))\n if self._shuffle:\n random.shuffle(self._waitlist)\n self._dataset = None\n self._cur_pos = 0 # num of sample done in this epoch\n self._cur_epoch = 0 # current num of epoch\n self._cur_iter = 0 # num of batches returned\n self._num_fields = 1\n self._out_h = 128\n self._out_w = 128\n\n self._image_cache = []\n\n self._lock = threading.Lock()\n\n # self.set_dataset()\n\n self._pool_size = cpu_count()\n\n self._pool = Pool(self._pool_size)\n self._cache_thread = threading.Thread(target=self.preload_dataset)\n self._cache_thread.start()\n\n def read_image(self, i):\n image_name = self._impath + self._imlist[i]\n # The channel for cv2.imread is B, G, R\n if not os.path.exists(image_name):\n print(image_name)\n image_arr = cv2.imread(image_name)\n image_arr = cv2.resize(image_arr, (self._out_w, self._out_h))\n h, w, _ = image_arr.shape\n # margin_h = 0\n # margin_w = 0\n # # image_arr = image_arr[\n # # margin_h : margin_h + self._out_h, margin_w : margin_w + self._out_w\n # # ]\n result = image_arr.astype(np.float32) / np.array(255.0, dtype=np.float32)\n result[:, :, [0, 1, 2]] = result[:, :, [2, 1, 0]]\n\n return result\n\n def __call__(self, *args, **kwargs):\n return self.next_batch(*args, **kwargs)\n\n def num_samples(self):\n return self._num_samples\n\n def epoch(self):\n return self._cur_epoch\n\n def iter(self):\n return self._cur_iter\n\n def num_fields(self):\n return self._num_fields\n\n def num_samples_finished(self):\n return self._cur_pos\n\n def reset(self):\n \"\"\" Reset the state of the data loader\n E.g., the reader points at the beginning of the dataset again\n :return: None\n \"\"\"\n self._cur_pos = 0\n self._cur_epoch = 0\n self._cur_iter = 0\n self._waitlist = list(range(len(self._imlist)))\n if self._shuffle:\n random.shuffle(self._waitlist)\n tmp = 0\n while self._cache_thread.isAlive():\n tmp += 1\n self._cache_thread = threading.Thread(target=self.preload_dataset)\n self._lock.acquire()\n self._image_cache = []\n self._lock.release()\n self._cache_thread.start()\n\n def preload_dataset(self):\n if self._debug:\n print(\"preload\")\n if len(self._image_cache) > self._cache_size:\n return\n else:\n while len(self._image_cache) < 1000:\n if len(self._waitlist) < 1000:\n self._waitlist += list(range(len(self._imlist)))\n if self._shuffle:\n random.shuffle(self._waitlist)\n\n results = self._pool.map(self.read_image, self._waitlist[:1000])\n del self._waitlist[:1000]\n self._lock.acquire()\n self._image_cache = self._image_cache + list(results)\n self._lock.release()\n if self._debug:\n print(len(self._image_cache))\n\n def next_batch(self, batch_size):\n \"\"\" fetch the next batch\n :param batch_size: next batch_size\n :return: a tuple includes all data\n \"\"\"\n if batch_size < 0:\n batch_size = 0\n if self._cache_size < 3 * batch_size:\n self._cache_size = 3 * batch_size\n\n this_batch = [None] * self._num_fields\n\n if len(self._image_cache) < batch_size:\n if self._debug:\n print(\"Blocking!!, Should only appear once with proper setting\")\n\n if not self._cache_thread.isAlive():\n self._cache_thread = threading.Thread(target=self.preload_dataset)\n self._cache_thread.start()\n self._cache_thread.join()\n\n self._lock.acquire()\n this_batch[0] = self._image_cache[0:batch_size]\n del self._image_cache[0:batch_size]\n self._lock.release()\n else:\n self._lock.acquire()\n this_batch[0] = self._image_cache[0:batch_size]\n del self._image_cache[0:batch_size]\n self._lock.release()\n if not self._cache_thread.isAlive():\n self._cache_thread = threading.Thread(target=self.preload_dataset)\n self._cache_thread.start()\n\n self._cur_iter += 1\n self._cur_pos = self._cur_pos + batch_size\n if self._cur_pos >= self._num_samples:\n self._cur_epoch += 1\n self._cur_pos = self._cur_pos % self._num_samples\n\n return this_batch\n\n @staticmethod\n def output_types(): # only used for net instance\n t = [\"float32\"]\n return t\n\n @staticmethod\n def output_shapes():\n t = [(None, self._out_h, self._out_w, 3)] # None for batch size\n return t\n\n @staticmethod\n def output_ranges():\n return [1.0]\n\n @staticmethod\n def output_keys():\n return [\"data\"]\n\n\nif __name__ == \"__main__\":\n d = Net(\"train\")\n ex = d.next_batch(16)\n"
] |
[
[
"numpy.array",
"pandas.read_csv"
]
] |
ywen666/code-transformer
|
[
"66ef3c93a4abf802d12ab9bfd7bdf487202924e9"
] |
[
"code_transformer/experiments/mixins/great_transformer.py"
] |
[
"from abc import ABC\n\nfrom torch.nn import CrossEntropyLoss\n\nfrom code_transformer.configuration.great_transformer import GreatTransformerConfig, GreatEncoderConfig\nfrom code_transformer.configuration.transformer_lm_decoder import TransformerLMDecoderConfig\nfrom code_transformer.experiments.experiment import ExperimentSetup, ex\nfrom code_transformer.modeling.constants import SOS_TOKEN, UNKNOWN_TOKEN\nfrom code_transformer.modeling.great_transformer.transformer import GreatEncoderTransformerAdapter, \\\n GreatTransformerDecoder\nfrom code_transformer.modeling.modelmanager import GreatModelManager\nfrom code_transformer.utils.loss import LabelSmoothingLoss\n\n\nclass GreatTransformerDecoderMixin(ExperimentSetup, ABC):\n\n @ex.capture(prefix=\"model\")\n def _init_model(self, lm_encoder: dict, lm_decoder: dict, with_cuda: bool, label_smoothing=None):\n\n transformer_config = GreatTransformerConfig(**lm_encoder['transformer_config'])\n\n config = GreatEncoderConfig(**lm_encoder)\n\n num_edge_types = 0\n for d in self.relative_distances:\n if d in [\"ancestor_sp\", \"sibling_sp\"]:\n num_edge_types += 2\n elif d == \"shortest_paths\":\n num_edge_types += 1\n transformer_config.bias_dim = num_edge_types\n config.transformer_config = transformer_config\n\n if hasattr(self, 'word_vocab'):\n config.vocab_size = len(self.word_vocab.vocabulary)\n if hasattr(self, 'node_type_vocab'):\n config.num_node_types = len(self.node_type_vocab.vocabulary)\n if hasattr(self, \"num_sub_tokens\"):\n config.subtokens_per_token = self.num_sub_tokens\n if hasattr(self, 'num_languages'):\n config.num_languages = self.num_languages\n\n great_lm_encoder = GreatEncoderTransformerAdapter(config)\n\n if label_smoothing is None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n else:\n loss_fct = LabelSmoothingLoss(label_smoothing)\n\n model_config = TransformerLMDecoderConfig(great_lm_encoder, sos_id=self.word_vocab[SOS_TOKEN],\n unk_id=self.word_vocab[UNKNOWN_TOKEN], loss_fct=loss_fct,\n output_subtokens_per_token=self.dataset_train.num_sub_tokens_output,\n use_pointer_network=self.use_pointer_network if hasattr(self,\n \"use_pointer_network\") else False,\n **lm_decoder)\n self.model_manager = GreatModelManager()\n self.model_lm = GreatTransformerDecoder(model_config)\n\n self.with_cuda = with_cuda\n"
] |
[
[
"torch.nn.CrossEntropyLoss"
]
] |
jyjen/helpers
|
[
"3a4914fcc7d80e13141c0ce15a39148d665d1b2e"
] |
[
"df_str_helpers.py"
] |
[
"import pandas as pd\nimport re\nimport spacy\nimport str_helpers as strh\nimport nltk\n# nltk.download('punkt')\n\n#? class Process\ndef split_sents_df(df: pd.core.frame.DataFrame,\n sent_col: str,\n segmenter: str):\n\n \"\"\"Splits sentences in a DataFrame and stacks the sentences into a new DataFrame.\n\n Arguments:\n df: pd.core.frame.DataFrame - DataFrame containing text to segment\n sent_col: str - Name of column containing paragraphs to segment\n segmenter: str - Sentence segmenter to use. Either 'nltk_punkt', 'regex' or 'spacy'\n \"\"\"\n\n temp_df = df.copy(deep = True)\n split = temp_df[sent_col].apply(lambda text: strh.split_sents(text, segmenter))\n\n return pd.DataFrame({'sent':[item for sublist in split.tolist()\n for item in sublist]})\n\n# ? class Sniffer\ndef check_in_row(df: pd.core.frame.DataFrame,\n pattern: str,\n re_flags=0,\n use_regex=True):\n\n \"\"\"Returns the rows of a dataframe which contain either a specific substring or regex pattern.\n\n Arguments:\n df: pd.core.frame.DataFrame - DataFrame to search for substrings or regex patterns in\n pattern: str - The specific substring or regex pattern to search for\n re_flags: RegexFlag - re module flags to use\n use_regex: boolean - If True use regex search; else search for substring\n \"\"\"\n\n re_series = [df[colname].str.contains(pattern,\n flags=re_flags,\n regex=use_regex) for colname in df.columns]\n re_df = pd.concat(re_series, axis=1)\n any_re = re_df.apply(lambda row: any(row), axis=1)\n\n return df[any_re]\n\n\nclass ReplaceTokens:\n\n \"\"\"\n For replacing tokens in a DataFrame.\n Substitutes tokens in a dataframe column using key-value pairs in the specified dictionary.\n\n Arguments:\n df: pd.core.frame.DataFrame - DataFrame containing sentences with tokens to be replaced.\n\n re_dict: dict - A dictionary containing with keys-value pairs which\n correspond to the word/phrase to be replaced and it's replacement.\n i.e. {'phrase2replace':'replacement phrase'} \n \"\"\"\n\n def __init__(self,\n df: pd.core.frame.DataFrame,\n re_dict: dict):\n\n self.temp_df = df.copy(deep=True)\n self.replacer = self.make_xlat(re_dict)\n\n @staticmethod\n def make_xlat(re_dict):\n\n \"\"\"\n Sets up a function which performs multi-string substitution in a single pass.\n For reference: https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html\n\n Arguments:\n re_dict: dict - A dictionary containing with keys-value pairs which\n correspond to the word/phrase to be replaced and it's replacement.\n i.e. {'phrase2replace':'replacement phrase'}\n \"\"\"\n\n rx = re.compile('|'.join(map(re.escape, re_dict)))\n\n def one_xlat(match):\n return re_dict[match.group(0)]\n\n def xlat(text):\n return rx.sub(one_xlat, text)\n\n return xlat\n\n def replace_in_col(self,\n str_col: str):\n\n \"\"\"\n Replaces tokens specified in self.re_dict in a specified column in self.df.\n\n Arguments:\n str_col: str - The name of the DataFrame column with tokens to be replaced.\n \"\"\"\n\n self.temp_df[str_col] = self.temp_df[str_col].apply(lambda sent: self.replacer(sent))\n\n return self.temp_df\n\n def replace_all(self):\n\n \"\"\"\n Replaces tokens specified in self.re_dict in all columns containing strings in self.df.\n \"\"\"\n\n text_cols = [colname for colname in self.temp_df.columns\n if all(self.temp_df[colname].apply(lambda item: isinstance(item, str)))]\n\n for col in text_cols:\n\n self.temp_df[col] = self.temp_df[col].apply(lambda sent: self.replacer(sent))\n\n return self.temp_df\n\n"
] |
[
[
"pandas.concat"
]
] |
kjappelbaum/pyepal
|
[
"006be9440c3fcff223b1b2a3f98222d732c60a48"
] |
[
"pyepal/pal/pal_gpy.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright 2020 PyePAL authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PAL using GPy GPR models\"\"\"\nimport concurrent.futures\nfrom functools import partial\n\nimport numpy as np\n\nfrom .pal_base import PALBase\nfrom .schedules import linear\nfrom .validate_inputs import validate_njobs, validate_number_models\n\n__all__ = [\"PALGPy\"]\n\n\ndef _train_model_picklable(i, models, restarts):\n model = models[i]\n model.optimize_restarts(restarts)\n return model\n\n\nclass PALGPy(PALBase):\n \"\"\"PAL class for a list of GPy GPR models, with one model per objective\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Contruct the PALGPy instance\n\n Args:\n X_design (np.array): Design space (feature matrix)\n models (list): Machine learning models\n ndim (int): Number of objectives\n epsilon (Union[list, float], optional): Epsilon hyperparameter.\n Defaults to 0.01.\n delta (float, optional): Delta hyperparameter. Defaults to 0.05.\n beta_scale (float, optional): Scaling parameter for beta.\n If not equal to 1, the theoretical guarantees do not necessarily hold.\n Also note that the parametrization depends on the kernel type.\n Defaults to 1/9.\n goals (List[str], optional): If a list, provide \"min\" for every objective\n that shall be minimized and \"max\" for every objective\n that shall be maximized. Defaults to None, which means\n that the code maximizes all objectives.\n coef_var_threshold (float, optional): Use only points with\n a coefficient of variation below this threshold\n in the classification step. Defaults to 3.\n restarts (int): Number of random restarts that are used for hyperparameter\n optimization. Defaults to 20.\n n_jobs (int): Number of parallel processes that are used to fit\n the GPR models. Defaults to 1.\n \"\"\"\n from .validate_inputs import ( # pylint:disable=import-outside-toplevel\n validate_gpy_model,\n )\n\n self.restarts = kwargs.pop(\"restarts\", 20)\n self.n_jobs = validate_njobs(kwargs.pop(\"n_jobs\", 1))\n\n assert isinstance(\n self.restarts, int\n ), \"the restarts keyword must be of type int\"\n super().__init__(*args, **kwargs)\n\n validate_number_models(self.models, self.ndim)\n validate_gpy_model(self.models)\n\n def _set_data(self):\n for i, model in enumerate(self.models):\n model.set_XY(\n self.design_space[self.sampled[:, i]],\n self.y[self.sampled[:, i], i].reshape(-1, 1),\n )\n\n def _train(self):\n pass # There is no training in instance based models\n\n def _predict(self):\n\n from ..models.gpr import predict # pylint:disable=import-outside-toplevel\n\n means, stds = [], []\n for model in self.models:\n mean, std = predict(model, self.design_space)\n means.append(mean.reshape(-1, 1))\n stds.append(std.reshape(-1, 1))\n\n self._means = np.hstack(means)\n self.std = np.hstack(stds)\n\n def _set_hyperparameters(self):\n models = []\n\n train_model_pickleable_partial = partial(\n _train_model_picklable, models=self.models, restarts=self.restarts\n )\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=self.n_jobs\n ) as executor:\n for model in executor.map(train_model_pickleable_partial, range(self.ndim)):\n models.append(model)\n self.models = models\n\n def _should_optimize_hyperparameters(self) -> bool:\n return linear(self.iteration, 10)\n"
] |
[
[
"numpy.hstack"
]
] |
DavideConficconi/dovado
|
[
"60abf088d301cbb96660238a4f39ea2fe5cbbb18"
] |
[
"graph_generation/ndovadomo_charts.py"
] |
[
"from datetime import date\nfrom locale import normalize\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.rcsetup as rcsetup\nimport numpy as np\nimport pandas as pd\nimport sys\nimport getopt\nimport math\nimport itertools\nimport os\nimport re\nimport argparse\nimport traceback\nimport compute_hypervolume as chyv\nfrom common_charts import *\n\nndovadomo_fnt=26\nndovadomo_leg_fonts=20\nndovadomo_tick_lbls=18\n\ndef single_barchart_multiplot(data_frame, name):\n prop_cycle = plt.rcParams['axes.prop_cycle']\n colors = prop_cycle.by_key()['color']\n\n labels = data_frame.columns.values\n #Take absolute values of data\n data_frame = data_frame.applymap(lambda x: abs(x))\n\n xfigs = 2\n yfigs = math.ceil(len(data_frame.index)/xfigs)\n fig, axs = plt.subplots(xfigs, yfigs, figsize=(18,4))\n nfigs = xfigs * yfigs\n for i in np.arange(0,nfigs):\n iidx = math.floor(i/yfigs)\n jidx = i % yfigs\n if i >= len(data_frame.index):\n fig.delaxes(axs[iidx][jidx])\n continue\n \n width = 0.3\n x = np.arange(len(labels))\n values = list(data_frame.iloc[i])\n\n axs[iidx][jidx].bar(x, values, width=width, color=colors)\n axs[iidx][jidx].set_xticks(x, rotation=45)\n axs[iidx][jidx].set_xticklabels(labels, rotation=15, fontsize=8)\n #axs[iidx][jidx].set_yticks(np.arange(0,1+0.1,0.1))\n #ylabel_vals = np.arange(0,maxres+maxres/10,maxres/10)\n #ylabel_vals = [round(x,2) for x in ylabel_vals]\n #axs[iidx][jidx].set_yticklabels(ylabel_vals)\n axs[iidx][jidx].set_ylabel('Design Parameters')\n axs[iidx][jidx].grid(visible=True, which='major', linestyle='dotted',axis='y')\n #ax2.set_ylabel('Frequency (MHz)')\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n plt.close(fig)\n\n\ndef dual_axis_barchart_frequency_res(data_frame, name):\n\n prop_cycle = plt.rcParams['axes.prop_cycle']\n colors = prop_cycle.by_key()['color']\n\n labels = data_frame.columns.values\n #Take absolute values of data\n data_frame = data_frame.applymap(lambda x: abs(x))\n\n #Normalize frequency data (0-100) \n fmax = data_frame['frequency'].max()\n data_frame['frequency'] = data_frame['frequency'].map(lambda x: (x / fmax))\n\n xfigs = 2\n yfigs = math.ceil(len(data_frame.index)/xfigs)\n #fig, axs = plt.subplots(xfigs, yfigs, figsize=(18,4))\n fig, axs = plt.subplots(xfigs, yfigs, figsize=(22,4))\n nfigs = xfigs * yfigs\n for i in np.arange(0,nfigs):\n iidx = math.floor(i/yfigs)\n jidx = i % yfigs\n if i >= len(data_frame.index):\n fig.delaxes(axs[iidx][jidx])\n continue\n ax2 = axs[iidx][jidx].twinx()\n res_df = data_frame.drop(columns='frequency')\n maxres = max(res_df.iloc[i])\n #Normalize resource data\n res_df = res_df.apply(lambda x: x / maxres)\n \n width = 0.3\n x = np.arange(len(labels))\n values = list(res_df.iloc[i])\n values.append(data_frame['frequency'][i])\n axs[iidx][jidx].bar(x, values, width=width, color=colors)\n axs[iidx][jidx].set_xticks(x)\n axs[iidx][jidx].set_xticklabels(shorten_labels(labels))\n ax2.set_yticks(np.arange(0,1+0.1,0.1))\n ax2.set_yticklabels(np.arange(0,fmax+10,fmax/10))\n axs[iidx][jidx].set_yticks(np.arange(0,1+0.1,0.1))\n ylabel_vals = np.arange(0,maxres+maxres/10,maxres/10)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n axs[iidx][jidx].set_yticklabels(ylabel_vals)\n axs[iidx][jidx].set_ylabel('Resource Utilization (%)')\n axs[iidx][jidx].grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax2.set_ylabel('Frequency (MHz)')\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n plt.close(fig)\n\ndef dual_axis_fused_barchart_frequency_res(data_frame, data_frame_2, name, normalize_res):\n\n labels_2 = data_frame_2.columns.values \n labels = data_frame.columns.values \n data_frame = data_frame.applymap(lambda x: abs(x))\n data_frame_joined = data_frame.join(data_frame_2)\n data_frame_joined = data_frame_joined.sort_values(by=['frequency'])\n data_frame = data_frame_joined.drop(columns=labels_2)\n data_frame_2 = data_frame_joined.drop(columns=labels)\n\n labels = data_frame.columns.values\n #Take absolute values of data\n data_frame = data_frame.applymap(lambda x: abs(x))\n #Normalize frequency data (0-100) \n fmax = data_frame['frequency'].max()\n data_frame['frequency'] = data_frame['frequency'].map(lambda x: (x / fmax))\n fig, ax = plt.subplots(2, 1, figsize=(5,5))\n # fig, ax = plt.subplots(2, 1, figsize=(10,5))\n\n ax2 = ax[0].twinx()\n res_df = data_frame.drop(columns='frequency')\n maxres = max(res_df.max())\n #Normalize resource data\n res_df = res_df.apply(lambda x: x / maxres)\n \n width = 0.2\n # width = 0.08\n x = np.arange(len(data_frame.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n if l == 'frequency':\n ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n else:\n ax[0].bar(x+offs[i], res_df[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n i = i + 1\n ax[0].set_xticks(x)\n ax2.set_yticks(np.linspace(0,1,num=10))\n ax2.set_yticklabels(map(math.floor, np.linspace(0,fmax,num=10)))\n ax[0].set_yticks(np.linspace(0,1,num=10))\n ylabel_vals = np.linspace(0,maxres, num=10)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax[0].set_yticklabels(ylabel_vals)\n if normalize_res:\n ax[0].set_ylabel('Resource Utilization (%)')\n else:\n ax[0].set_ylabel('Resources')\n ax[0].plot([0., len(x)], [100/maxres, 100/maxres], \"k--\")\n labels = np.append('100% Utilization',labels)\n ax[0].grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax2.set_ylabel('Frequency (MHz)')\n\n ax[0].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(-0.1, 1.25), ncol=len(data_frame.columns))\n # ax[0].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.1, 1.2), ncol=len(data_frame.columns))\n #, bbox_to_anchor=(1.1, 0.5))\n\n labels = data_frame_2.columns.values\n #ax2 = ax[1].twinx()\n width = 0.2\n # width = 0.08\n x = np.arange(len(data_frame_2.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n ax[1].bar(x+offs[i], data_frame_2[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n i = i + 1\n ax[1].set_xticks(x)\n #ax.set_xticklabels(shorten_labels(labels))\n #ax2.set_yticks(np.arange(0,1+0.1,0.1))\n #ax2.set_yticklabels(map(math.floor, np.arange(0,fmax+10,fmax/10)))\n #ax[1].set_yticks(np.arange(0,1+0.1,0.1))\n #ylabel_vals = np.arange(0,maxres+maxres/10,maxres/10)\n #ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax[1].set_yticks(np.arange(0,data_frame_2.max().max(),5))\n ax[1].set_ylabel('Design Parameters')\n ax[1].grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax[1].set_xlabel('Solution')\n #ax[1].legend(shorten_labels(labels),loc='center left', bbox_to_anchor=(1.1, 0.5))\n ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(-0.1, 1.25), ncol=len(data_frame_2.columns))\n # ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.2), ncol=len(data_frame_2.columns))\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n fig.savefig(name + '.pdf', format='pdf', bbox_inches = 'tight', dpi=1200)\n plt.close(fig)\n\ndef dual_axis_fused_barchart_frequency_res_cicero(data_frame, data_frame_2, name, normalize_res, tirex):\n\n # labels = data_frame.columns.values \n\n data_frame = data_frame.applymap(lambda x: abs(x))\n data_frame = data_frame.sort_values(by=['frequency'])\n\n hatches = ['/', '\\\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']\n matplotlib.rcParams['hatch.linewidth'] = 0.3 # previous pdf hatch linewidth\n #Take absolute values of data\n #Normalize frequency data (0-100) \n fmax = data_frame['frequency'].max()\n avgperf_max = data_frame['avg_perf_metric'].max()\n data_frame['frequency'] = data_frame['frequency'].map(lambda x: (x / fmax))\n data_frame['instr_mem_size'] = data_frame['instr_mem_size'].map(lambda x: (np.log2(x)))\n isize_max = data_frame['instr_mem_size'].max()\n\n fig, ax = plt.subplots(3, 1, figsize=(10,8))\n\n ax2 = ax[0].twinx()\n first_df=data_frame.drop(columns=['instr_mem_size','avg_perf_metric'])\n labels = first_df.columns.values\n res_df = data_frame.drop(columns='frequency')\n if tirex:\n custom_mtrc_df = res_df.drop(columns=['CLB LUTs*','CLB Registers'])\n else:\n custom_mtrc_df = res_df.drop(columns=['CLB LUTs*'])\n\n res_df_only = res_df.drop(columns=['instr_mem_size','avg_perf_metric'])\n\n #res_df_only = data_frame.drop(columns='frequency')\n maxres = max(res_df_only.max())\n #Normalize resource data\n res_df_only = res_df_only.apply(lambda x: x / maxres)\n\n width = 0.25\n x = np.arange(len(data_frame.index))\n\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n if l == 'frequency':\n ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='ooo',zorder=3)\n #elif l == 'instr_mem_size' or l == 'avg_perf_metric':\n # pass\n #ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='///',zorder=3)\n else:\n ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='xxx',zorder=3)\n i = i + 1\n ax[0].set_xticks(x)\n \n ax[0].margins(x=0.01)\n ax2.margins(x=0.01)\n\n ax2.set_yticks(np.linspace(0,1,num=10))\n ax2.set_yticklabels(map(math.floor, np.linspace(0,fmax,num=10)))\n ax[0].set_yticks(np.linspace(0,1,num=10))\n ylabel_vals = np.linspace(0,maxres, num=10)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax[0].set_yticklabels(ylabel_vals)\n if normalize_res:\n ax[0].set_ylabel('Resource Utilization (%)')\n else:\n ax[0].set_ylabel('Resources')\n ax[0].plot([0., len(x)], [100/maxres, 100/maxres], \"k--\")\n labels = np.append('100% Utilization',labels)\n ax[0].grid(visible=True, which='major', linestyle='dotted',axis='y',zorder=0)\n ax2.set_ylabel('Frequency (MHz)')\n\n ax[0].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=len(first_df.columns))\n\n\n\n custom_mtrc_df_perf = custom_mtrc_df.drop(columns='instr_mem_size')\n labels = custom_mtrc_df.columns.values\n labels = np.flip(labels)\n maxres = max(custom_mtrc_df_perf.max())\n\n ax2 = ax[1].twinx()\n width = 0.25\n x = np.arange(len(custom_mtrc_df.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n res_df_only = res_df_only.apply(lambda x: x / maxres)\n\n i = 0\n for l in labels:\n if l == 'instr_mem_size':\n ax[1].bar(x+offs[i], custom_mtrc_df[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='///',zorder=3)\n else:\n ax[1].bar(x+offs[i], custom_mtrc_df_perf[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='///',zorder=3)\n i = i + 1\n ax[1].set_xticks(x)\n \n ax[1].margins(x=0.01)\n ax2.margins(x=0.01)\n ax2.set_yticks(np.linspace(0,1,num=10))\n ax2.set_yticklabels(map(math.floor, np.linspace(0,isize_max,num=10)))\n ax[1].set_yticks(np.linspace(0,avgperf_max,num=10))\n ylabel_vals = np.linspace(0,maxres, num=10)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax[1].set_yticklabels(ylabel_vals)\n if normalize_res:\n ax[1].set_ylabel('Throughput [Gbit/s]')\n else:\n ax[1].set_ylabel('Resources')\n ax[1].plot([0., len(x)], [100/maxres, 100/maxres], \"k--\")\n labels = np.append('100% Utilization',labels)\n ax[1].grid(visible=True, which='major', linestyle='dotted',axis='y',zorder=0)\n ax2.set_ylabel('$I Size [2**x]')\n\n ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=len(custom_mtrc_df.columns))\n\n # labels = data_frame_2.columns.values\n # width = 0.25\n # x = np.arange(len(data_frame_2.index))\n # offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n # i = 0\n # for l in labels:\n # ax[1].bar(x+offs[i], data_frame_2[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=3)\n # i = i + 1\n # ax[1].set_xticks(x)\n # ax[1].set_yticks(np.arange(0,data_frame_2.max().max(),5))\n # ax[1].set_ylabel('Design Parameters')\n # ax[1].grid(visible=True, which='major', linestyle='dotted',axis='y',zorder=0)\n # ax[1].set_xlabel('Solution')\n # ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.4), ncol=len(data_frame_2.columns))\n\n # #fig.tight_layout()\n # fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n # fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n\n # custom_mtrc_noinstr_df = custom_mtrc_df.drop(columns='instr_mem_size')\n # labels = custom_mtrc_df.columns.values\n # ax2 = ax[1].twinx()\n # #res_df = data_frame.drop(columns='frequency')\n # maxres = max(custom_mtrc_noinstr_df.max())\n # #Normalize resource data\n # custom_mtrc_noinstr_df = custom_mtrc_noinstr_df.apply(lambda x: x / avgperf_max)\n # custom_mtrc_df['instr_mem_size'] = custom_mtrc_df['instr_mem_size'].map(lambda x: (np.log2(x)))\n \n # width = 0.08\n # x = np.arange(len(custom_mtrc_df.index))\n # offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n # i = 0\n # for l in labels:\n # if l == 'instr_mem_size':\n # ax[1].bar(x+offs[i], custom_mtrc_df[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n # else:\n # ax[1].bar(x+offs[i], custom_mtrc_noinstr_df[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n # i = i + 1\n # ax[1].set_xticks(x)\n # ax2.set_yticks(np.linspace(0,1,num=10))\n # ax2.set_yticklabels(map(math.floor, np.linspace(0,isize_max,num=10)))\n # ax[1].set_yticks(np.linspace(0,1,num=10))\n # ylabel_vals = np.linspace(0,avgperf_max, num=10)\n # ylabel_vals = [round(x,2) for x in ylabel_vals]\n # ax[1].set_yticklabels(ylabel_vals)\n # if normalize_res:\n # ax[1].set_ylabel('Resource Utilization (%)')\n # else:\n # ax[1].set_ylabel('Resources')\n # ax[1].plot([0., len(x)], [100/avgperf_max, 100/avgperf_max], \"k--\")\n # labels = np.append('100% Utilization',labels)\n # ax[1].grid(visible=True, which='major', linestyle='dotted',axis='y')\n # ax2.set_ylabel('Frequency (MHz)')\n\n # ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.1, 1.4), ncol=len(custom_mtrc_df.columns))\n\n\n\n labels = data_frame_2.columns.values\n width = 0.25\n x = np.arange(len(data_frame_2.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n ax[2].bar(x+offs[i], data_frame_2[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n i = i + 1\n ax[2].set_xticks(x)\n \n ax[2].margins(x=0.01)\n ax[2].set_yticks(np.arange(0,data_frame_2.max().max(),5))\n ax[2].set_ylabel('Design Parameters')\n ax[2].grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax[2].set_xlabel('Solution')\n ax[2].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=len(data_frame_2.columns))\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n #fig.tight_layout(pad=0.5)\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n fig.savefig(name + '.pdf', format='pdf', bbox_inches = 'tight', dpi=1200)\n plt.close(fig)\n\ndef dual_axis_fused_barchart_frequency_res_third(data_frame, data_frame_2, name, normalize_res, tirex):\n\n labels_2 = data_frame_2.columns.values \n labels = data_frame.columns.values \n data_frame = data_frame.applymap(lambda x: abs(x))\n data_frame_joined = data_frame.join(data_frame_2)\n data_frame_joined = data_frame_joined.sort_values(by=['frequency'])\n idx_names= data_frame_joined[data_frame_joined['CLB LUTs*'] > 100].index\n data_frame_joined.drop(idx_names, inplace=True)\n data_frame_joined = data_frame_joined.applymap(lambda x: abs(x))\n data_frame = data_frame_joined.drop(columns=labels_2)\n data_frame_2 = data_frame_joined.drop(columns=labels)\n\n # hatches = ['/', '\\\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']\n # matplotlib.rcParams['hatch.linewidth'] = 0.3 # previous pdf hatch linewidth\n #Take absolute values of data\n #Normalize frequency data (0-100) \n fmax = data_frame['frequency'].max()\n avgperf_max = data_frame['avg_perf_metric'].max()\n data_frame['frequency'] = data_frame['frequency'].map(lambda x: (x / fmax))\n data_frame['instr_mem_size'] = data_frame['instr_mem_size'].map(lambda x: (np.log2(x)))\n isize_max = data_frame['instr_mem_size'].max()\n\n fig, ax = plt.subplots(2, 1, figsize=(9,5))\n\n ax2 = ax[0].twinx()\n first_df=data_frame #.drop(columns=['instr_mem_size','avg_perf_metric'])\n labels = first_df.columns.values\n res_df = data_frame.drop(columns='frequency')\n\n maxres = max(res_df.max())\n res_df_only = res_df.apply(lambda x: x / maxres)\n\n width = 0.25\n x = np.arange(len(data_frame.index))\n\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n if l == 'frequency':\n ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=3)\n # ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='///',zorder=3)\n elif l == 'instr_mem_size' or l == 'avg_perf_metric':\n ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=5)\n # ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='ooo',zorder=3)\n else:\n ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=4)\n # ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='xxx',zorder=3)\n i = i + 1\n ax[0].set_xticks(x)\n \n ax[0].margins(x=0.01)\n ax2.margins(x=0.01)\n\n ax2.set_yticks(np.linspace(0,1,num=10))\n ax2.set_yticklabels(map(math.floor, np.linspace(0,fmax,num=10)))\n ax[0].set_yticks(np.linspace(0,1,num=10))\n ylabel_vals = np.linspace(0,maxres, num=10)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax[0].set_yticklabels(ylabel_vals)\n if normalize_res:\n ax[0].set_ylabel('Scaled Metrics')\n # ax[0].set_ylabel('Resource Utilization (%)\\nThroughput [Gb/s]\\n2^x $I lines')\n else:\n ax[0].set_ylabel('Resources')\n ax[0].plot([0., len(x)], [100/maxres, 100/maxres], \"k--\")\n labels = np.append('100% Utilization',labels)\n ax[0].grid(visible=True, which='major', linestyle='dotted',axis='y',zorder=0)\n ax2.set_ylabel('Frequency (MHz)')\n\n ax[0].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=len(labels))\n\n\n labels = data_frame_2.columns.values\n width = 0.25\n x = np.arange(len(data_frame_2.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n ax[1].bar(x+offs[i], data_frame_2[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n i = i + 1\n ax[1].set_xticks(x)\n \n ax[1].margins(x=0.01)\n ax[1].set_yticks(np.arange(0,data_frame_2.max().max(),5))\n ax[1].set_ylabel('Design Parameters')\n ax[1].grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax[1].set_xlabel('Solution')\n ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=len(data_frame_2.columns))\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n #fig.tight_layout(pad=0.5)\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n fig.savefig(name + '.pdf', format='pdf', bbox_inches = 'tight', dpi=1200)\n plt.close(fig)\n\ndef dual_axis_fused_barchart_frequency_res_hypervolume(data_frame, data_frame_2, name, normalize_res, mins, maxs):\n\n\n new_df = data_frame.copy()\n #print(data_frame)\n data_frame = data_frame.applymap(lambda x: abs(x))\n avgperf_max = data_frame['avg_perf_metric'].max()\n data_frame['instr_mem_size'] = data_frame['instr_mem_size'].map(lambda x: (np.log2(x)))\n isize_max = data_frame['instr_mem_size'].max()\n #compute hv\n hv, nadir = chyv.compute_hv_nadir_as_list(new_df, mins, maxs)\n contribs = hv.contributions(nadir)\n contribs_df=pd.DataFrame(contribs, columns=['hypervolume'])\n #print(contribs_df.sort_values(by=['hypervolume'], ascending=False).head(5))\n # print(data_frame)\n #join\n labels_2 = data_frame_2.columns.values \n labels = data_frame.columns.values \n data_frame = data_frame.applymap(lambda x: abs(x))\n data_frame = data_frame.join(contribs_df)\n data_frame_joined = data_frame.join(data_frame_2)\n #idx_names= data_frame_joined[data_frame_joined['CLB LUTs*'] > 100].index\n #data_frame_joined.drop(idx_names, inplace=True)\n data_frame_joined = data_frame_joined.sort_values(by=['hypervolume'], ascending=False).head(5)\n data_frame_joined = data_frame_joined.sort_values(by=['frequency'])\n data_frame_joined = data_frame_joined.drop(columns='hypervolume')\n #TODO apply the filtering\n #filter\n #sort\n #drop\n\n\n\n #data_frame_joined = data_frame_joined.applymap(lambda x: abs(x))\n data_frame = data_frame_joined.drop(columns=labels_2)\n data_frame_2 = data_frame_joined.drop(columns=labels)\n\n #Take absolute values of data\n #Normalize frequency data (0-100) \n fmax = data_frame['frequency'].max()\n data_frame['frequency'] = data_frame['frequency'].map(lambda x: (x / fmax))\n\n fig, ax = plt.subplots(2, 1, figsize=(16,10))\n ax[0].tick_params(axis='both', which='both', labelsize=ndovadomo_tick_lbls)\n ax[1].tick_params(axis='both', which='both', labelsize=ndovadomo_tick_lbls)\n\n ax2 = ax[0].twinx()\n ax2.tick_params(axis='both', which='both', labelsize=ndovadomo_tick_lbls)\n\n first_df=data_frame #.drop(columns=['instr_mem_size','avg_perf_metric'])\n labels = first_df.columns.values\n res_df = data_frame.drop(columns='frequency')\n\n maxres = max(res_df.max())\n res_df_only = res_df.apply(lambda x: x / maxres)\n\n width = 0.2\n x = np.arange(len(data_frame.index))\n\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n if l == 'frequency':\n ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=3)\n # ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='///',zorder=3)\n elif l == 'instr_mem_size' or l == 'avg_perf_metric':\n ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=5)\n # ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='ooo',zorder=3)\n else:\n ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=4)\n # ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='xxx',zorder=3)\n i = i + 1\n ax[0].set_xticks(x)\n \n ax[0].margins(x=0.01)\n ax2.margins(x=0.01)\n\n tick_num=8\n ax2.set_yticks(np.linspace(0,1,num=tick_num))\n ax2.set_yticklabels(map(math.floor, np.linspace(0,fmax,num=tick_num)))\n ax[0].set_yticks(np.linspace(0,1,num=tick_num))\n ylabel_vals = np.linspace(0,maxres, num=tick_num)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax[0].set_yticklabels(ylabel_vals)\n if normalize_res:\n ax[0].set_ylabel('Scaled Metrics', fontsize=ndovadomo_fnt)\n # ax[0].set_ylabel('Resource Utilization (%)\\nThroughput [Gb/s]\\n2^x $I lines')\n else:\n ax[0].set_ylabel('Resources')\n ax[0].plot([0., len(x)], [100/maxres, 100/maxres], \"k--\")\n labels = np.append('100% Utilization',labels)\n ax[0].grid(visible=True, which='major', linestyle='dotted',axis='y',zorder=0)\n ax2.set_ylabel('Frequency (MHz)', fontsize=ndovadomo_fnt)\n\n ax[0].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.25), ncol=len(labels),fontsize=ndovadomo_leg_fonts)\n # ax[0].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=int(math.ceil(len(labels)/2)))\n\n\n labels = data_frame_2.columns.values\n width = 0.2\n x = np.arange(len(data_frame_2.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n ax[1].bar(x+offs[i], data_frame_2[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n i = i + 1\n ax[1].set_xticks(x)\n \n ax[1].margins(x=0.01)\n ax[1].set_yticks(np.arange(0,data_frame_2.max().max(),5))\n ax[1].set_ylabel('Design Parameters', fontsize=ndovadomo_fnt)\n ax[1].grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax[1].set_xlabel('Solution', fontsize=ndovadomo_fnt)\n ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.25), ncol=len(data_frame_2.columns),fontsize=ndovadomo_leg_fonts)\n # ax[1].legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=int(math.ceil(len(data_frame_2.columns)/2)))\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n #fig.tight_layout(pad=0.5)\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n fig.savefig(name + '.pdf', format='pdf', bbox_inches = 'tight', dpi=1200)\n plt.close(fig)\n\ndef dual_axis_fused_barchart_frequency_res_separated(data_frame, data_frame_2, name, name2,normalize_res, tirex):\n\n labels_2 = data_frame_2.columns.values \n labels = data_frame.columns.values \n data_frame = data_frame.applymap(lambda x: abs(x))\n data_frame_joined = data_frame.join(data_frame_2)\n data_frame_joined = data_frame_joined.sort_values(by=['frequency'])\n idx_names= data_frame_joined[data_frame_joined['CLB LUTs*'] > 100].index\n data_frame_joined.drop(idx_names, inplace=True)\n data_frame_joined = data_frame_joined.applymap(lambda x: abs(x))\n\n data_frame = data_frame_joined.drop(columns=labels_2)\n data_frame_2 = data_frame_joined.drop(columns=labels)\n\n # hatches = ['/', '\\\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']\n # matplotlib.rcParams['hatch.linewidth'] = 0.3 # previous pdf hatch linewidth\n #Take absolute values of data\n #Normalize frequency data (0-100) \n fmax = data_frame['frequency'].max()\n avgperf_max = data_frame['avg_perf_metric'].max()\n data_frame['frequency'] = data_frame['frequency'].map(lambda x: (x / fmax))\n data_frame['instr_mem_size'] = data_frame['instr_mem_size'].map(lambda x: (np.log2(x)))\n isize_max = data_frame['instr_mem_size'].max()\n\n fig, ax = plt.subplots(1, 1, figsize=(11,3))\n\n ax2 = ax.twinx()\n first_df=data_frame #.drop(columns=['instr_mem_size','avg_perf_metric'])\n labels = first_df.columns.values\n res_df = data_frame.drop(columns='frequency')\n\n maxres = max(res_df.max())\n res_df_only = res_df.apply(lambda x: x / maxres)\n\n width = 0.25\n x = np.arange(len(data_frame.index))\n\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n if l == 'frequency':\n ax.bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=3)\n # ax[0].bar(x+offs[i], data_frame[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='///',zorder=3)\n elif l == 'instr_mem_size' or l == 'avg_perf_metric':\n ax.bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=3)\n # ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='ooo',zorder=3)\n else:\n ax.bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5,zorder=3)\n # ax[0].bar(x+offs[i], res_df_only[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5, hatch='xxx',zorder=3)\n i = i + 1\n ax.set_xticks(x)\n \n ax.margins(x=0.01)\n ax2.margins(x=0.01)\n\n ax2.set_yticks(np.linspace(0,1,num=10))\n ax2.set_yticklabels(map(math.floor, np.linspace(0,fmax,num=10)))\n ax.set_yticks(np.linspace(0,1,num=10))\n ylabel_vals = np.linspace(0,maxres, num=10)\n ylabel_vals = [round(x,2) for x in ylabel_vals]\n ax.set_yticklabels(ylabel_vals)\n if normalize_res:\n ax.set_ylabel('Scaled Metrics [%] [Gb/s] [2^x lines]')\n else:\n ax.set_ylabel('Resources')\n ax.plot([0., len(x)], [100/maxres, 100/maxres], \"k--\")\n labels = np.append('100% Utilization',labels)\n ax.grid(visible=True, which='major', linestyle='dotted',axis='y',zorder=0)\n ax2.set_ylabel('Frequency (MHz)')\n\n ax.legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.2), ncol=len(labels))\n # fig.tight_layout(rect=[0, 0.0, 1, 1])\n ax.set_xlabel('Solution')\n\n fig.savefig(name + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n fig.savefig(name + '.pdf', format='pdf', bbox_inches = 'tight', dpi=1200)\n\n fig, ax = plt.subplots(1, 1, figsize=(9,4))\n\n labels = data_frame_2.columns.values\n width = 0.25\n x = np.arange(len(data_frame_2.index))\n offs = np.arange((-len(labels)/2)*width, (len(labels)/2)*width, width)\n i = 0\n for l in labels:\n ax.bar(x+offs[i], data_frame_2[l], width=width, color=colors[i],edgecolor='black',linewidth=0.5)\n i = i + 1\n ax.set_xticks(x)\n \n ax.margins(x=0.01)\n ax.set_yticks(np.arange(0,data_frame_2.max().max(),5))\n ax.set_ylabel('Design Parameters')\n ax.grid(visible=True, which='major', linestyle='dotted',axis='y')\n ax.set_xlabel('Solution')\n ax.legend(shorten_labels(labels), loc='upper left', bbox_to_anchor=(0.0, 1.28), ncol=len(data_frame_2.columns))\n\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n #fig.tight_layout(pad=0.5)\n fig.savefig(name2 + '.svg', format='svg', bbox_inches = 'tight', dpi=1200)\n fig.savefig(name2 + '.pdf', format='pdf', bbox_inches = 'tight', dpi=1200)\n\n plt.close(fig)\n\n"
] |
[
[
"numpy.log2",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.append",
"matplotlib.pyplot.close",
"numpy.flip"
]
] |
SUTDBrainLab/MGP-VAE
|
[
"0b7c252f9f7bdcdf3c4177ac40585633a0e98a0f"
] |
[
"create_data/create_moving_mnist_data.py"
] |
[
"from PIL import Image\nimport sys\nimport os\nimport math\nimport numpy as np\n\n###########################################################################################\n# script to generate moving mnist video dataset (frame by frame) as described in\n# [1] arXiv:1502.04681 - Unsupervised Learning of Video Representations Using LSTMs\n# Srivastava et al\n# by Tencia Lee\n# saves in hdf5, npz, or jpg (individual frames) format\n###########################################################################################\n\n# helper functions\ndef arr_from_img(im,shift=0):\n\tw,h=im.size\n\tarr=im.getdata()\n\tc = np.product(arr.size) / (w*h)\n\treturn np.asarray(arr, dtype=np.float32).reshape((h,w,c)).transpose(2,1,0) / 255. - shift\n\ndef get_picture_array(X, index, shift=0):\n\tch, w, h = X.shape[1], X.shape[2], X.shape[3]\n\tret = ((X[index]+shift)*255.).reshape(ch,w,h).transpose(2,1,0).clip(0,255).astype(np.uint8)\n\tif ch == 1:\n\t\tret=ret.reshape(h,w)\n\treturn ret\n\n# loads mnist from web on demand\ndef load_dataset():\n\tif sys.version_info[0] == 2:\n\t\tfrom urllib import urlretrieve\n\telse:\n\t\tfrom urllib.request import urlretrieve\n\tdef download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n\t\tprint(\"Downloading %s\" % filename)\n\t\turlretrieve(source + filename, filename)\n\timport gzip\n\tdef load_mnist_images(filename):\n\t\tif not os.path.exists(filename):\n\t\t\tdownload(filename)\n\t\twith gzip.open(filename, 'rb') as f:\n\t\t\tdata = np.frombuffer(f.read(), np.uint8, offset=16)\n\t\tdata = data.reshape(-1, 1, 28, 28).transpose(0,1,3,2)\n\t\treturn data / np.float32(255)\n\treturn load_mnist_images('train-images-idx3-ubyte.gz')\n\n# generates and returns video frames in uint8 array\ndef generate_moving_mnist(shape=(64,64), seq_len=30, seqs=10000, num_sz=28, nums_per_image=1):\n\tmnist = load_dataset()\n\twidth, height = shape\n\tlims = (x_lim, y_lim) = width-num_sz, height-num_sz\n\tdataset = np.empty((seq_len*seqs, 1, width, height), dtype=np.uint8)\n\tfor seq_idx in xrange(seqs):\n\t\t# randomly generate direc/speed/position, calculate velocity vector\n\t\tdirecs = np.pi * (np.random.rand(nums_per_image)*2 - 1)\n\t\tspeeds = np.random.randint(5, size=nums_per_image)+2\n\t\tveloc = [(v*math.cos(d), v*math.sin(d)) for d,v in zip(direcs, speeds)]\n\t\tmnist_images = [Image.fromarray(get_picture_array(mnist,r,shift=0)).resize((num_sz,num_sz), Image.ANTIALIAS) \\\n\t\t\t for r in np.random.randint(0, mnist.shape[0], nums_per_image)]\n\t\tpositions = [(np.random.rand()*x_lim, np.random.rand()*y_lim) for _ in xrange(nums_per_image)]\n\t\tfor frame_idx in xrange(seq_len):\n\t\t\tcanvases = [Image.new('L', (width,height)) for _ in xrange(nums_per_image)]\n\t\t\tcanvas = np.zeros((1,width,height), dtype=np.float32)\n\t\t\tfor i,canv in enumerate(canvases):\n\t\t\t\tcanv.paste(mnist_images[i], tuple(map(lambda p: int(round(p)), positions[i])))\n\t\t\t\tcanvas += arr_from_img(canv, shift=0)\n\t\t\t# update positions based on velocity\n\t\t\tnext_pos = [map(sum, zip(p,v)) for p,v in zip(positions, veloc)]\n\t\t\t# bounce off wall if a we hit one\n\t\t\tfor i, pos in enumerate(next_pos):\n\t\t\t\tfor j, coord in enumerate(pos):\n\t\t\t\t\tif coord < -2 or coord > lims[j]+2:\n\t\t\t\t\t\tveloc[i] = tuple(list(veloc[i][:j]) + [-1 * veloc[i][j]] + list(veloc[i][j+1:]))\n\t\t\tpositions = [map(sum, zip(p,v)) for p,v in zip(positions, veloc)]\n\t\t\t# copy additive canvas to data array\n\t\t\tdataset[seq_idx*seq_len+frame_idx] = (canvas * 255).astype(np.uint8).clip(0,255)\n\treturn dataset\n\ndef main(dest, filetype='npz', frame_size=64, seq_len=20, seqs=10000, num_sz=28, nums_per_image=1):\n\tdat = generate_moving_mnist(shape=(frame_size,frame_size), seq_len=seq_len, seqs=seqs, \\\n\t\t\t\t\t\t\t\tnum_sz=num_sz, nums_per_image=nums_per_image)\n\tn = seqs * seq_len\n\tif filetype == 'hdf5':\n\t\timport h5py\n\t\tfrom fuel.datasets.hdf5 import H5PYDataset\n\t\tdef save_hd5py(dataset, destfile, indices_dict):\n\t\t\tf = h5py.File(destfile, mode='w')\n\t\t\timages = f.create_dataset('images', dataset.shape, dtype='uint8')\n\t\t\timages[...] = dataset\n\t\t\tsplit_dict = dict((k, {'images':v}) for k,v in indices_dict.iteritems())\n\t\t\tf.attrs['split'] = H5PYDataset.create_split_array(split_dict)\n\t\t\tf.flush()\n\t\t\tf.close()\n\t\tindices_dict = {'train': (0, n*9/10), 'test': (n*9/10, n)}\n\t\tsave_hd5py(dat, dest, indices_dict)\n\telif filetype == 'npz':\n\t\tnp.savez(dest, dat)\n\telif filetype == 'jpg':\n\t\tfor i in xrange(dat.shape[0]):\n\t\t\tImage.fromarray(get_picture_array(dat, i, shift=0)).save(os.path.join(dest, '{}.jpg'.format(i)))\n\nif __name__ == '__main__':\n\timport argparse\n\tparser = argparse.ArgumentParser(description='Command line options')\n\tparser.add_argument('--dest', type=str, dest='dest', default='./data/')\n\tparser.add_argument('--filetype', type=str, dest='filetype', default='npz')\n\tparser.add_argument('--frame_size', type=int, dest='frame_size', default='10')\n\tparser.add_argument('--seq_len', type=int, dest='seq_len', default='20') # length of each sequence\n\tparser.add_argument('--seqs', type=int, dest='seqs', default='10000') # number of sequences to generate\n\tparser.add_argument('--num_sz', type=int, dest='num_sz', default='28') # size of mnist digit within frame\n\tparser.add_argument('--nums_per_image', type=int, dest='nums_per_image', default='1') # number of digits in each frame\n\targs = parser.parse_args(sys.argv[1:])\n\tmain(**{k:v for (k,v) in vars(args).items() if v is not None})\n"
] |
[
[
"numpy.product",
"numpy.savez",
"numpy.asarray",
"numpy.random.rand",
"numpy.float32",
"numpy.zeros",
"numpy.empty",
"numpy.random.randint"
]
] |
cuent/pytorch-lightning
|
[
"fde972ffc2cdca3fccb904d29c2f1c32963fcd72"
] |
[
"pl_examples/domain_templates/generative_adversarial_net.py"
] |
[
"\"\"\"\nTo run this template just do:\npython generative_adversarial_net.py\n\nAfter a few epochs, launch TensorBoard to see the images being generated at every batch:\n\ntensorboard --logdir default\n\"\"\"\nimport os\nfrom argparse import ArgumentParser, Namespace\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F # noqa\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import MNIST\n\nfrom pytorch_lightning.core import LightningModule, LightningDataModule\nfrom pytorch_lightning.trainer import Trainer\n\n\nclass Generator(nn.Module):\n def __init__(self, latent_dim, img_shape):\n super().__init__()\n self.img_shape = img_shape\n\n def block(in_feat, out_feat, normalize=True):\n layers = [nn.Linear(in_feat, out_feat)]\n if normalize:\n layers.append(nn.BatchNorm1d(out_feat, 0.8))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *block(latent_dim, 128, normalize=False),\n *block(128, 256),\n *block(256, 512),\n *block(512, 1024),\n nn.Linear(1024, int(np.prod(img_shape))),\n nn.Tanh()\n )\n\n def forward(self, z):\n img = self.model(z)\n img = img.view(img.size(0), *self.img_shape)\n return img\n\n\nclass Discriminator(nn.Module):\n def __init__(self, img_shape):\n super().__init__()\n\n self.model = nn.Sequential(\n nn.Linear(int(np.prod(img_shape)), 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 1),\n )\n\n def forward(self, img):\n img_flat = img.view(img.size(0), -1)\n validity = self.model(img_flat)\n\n return validity\n\n\nclass GAN(LightningModule):\n @staticmethod\n def add_argparse_args(parent_parser: ArgumentParser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\n parser.add_argument(\"--b1\", type=float, default=0.5,\n help=\"adam: decay of first order momentum of gradient\")\n parser.add_argument(\"--b2\", type=float, default=0.999,\n help=\"adam: decay of second order momentum of gradient\")\n parser.add_argument(\"--latent_dim\", type=int, default=100,\n help=\"dimensionality of the latent space\")\n\n return parser\n\n def __init__(self, hparams: Namespace):\n super().__init__()\n\n self.hparams = hparams\n\n # networks\n mnist_shape = (1, 28, 28)\n self.generator = Generator(latent_dim=self.hparams.latent_dim, img_shape=mnist_shape)\n self.discriminator = Discriminator(img_shape=mnist_shape)\n\n self.validation_z = torch.randn(8, self.hparams.latent_dim)\n\n self.example_input_array = torch.zeros(2, self.hparams.latent_dim)\n\n def forward(self, z):\n return self.generator(z)\n\n @staticmethod\n def adversarial_loss(y_hat, y):\n return F.binary_cross_entropy_with_logits(y_hat, y)\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n imgs, _ = batch\n\n # sample noise\n z = torch.randn(imgs.shape[0], self.hparams.latent_dim)\n z = z.type_as(imgs)\n\n # train generator\n if optimizer_idx == 0:\n # ground truth result (ie: all fake)\n # put on GPU because we created this tensor inside training_loop\n valid = torch.ones(imgs.size(0), 1)\n valid = valid.type_as(imgs)\n\n # adversarial loss is binary cross-entropy\n g_loss = self.adversarial_loss(self.discriminator(self(z)), valid)\n tqdm_dict = {'g_loss': g_loss}\n self.log_dict(tqdm_dict)\n return g_loss\n\n # train discriminator\n if optimizer_idx == 1:\n # Measure discriminator's ability to classify real from generated samples\n\n # how well can it label as real?\n valid = torch.ones(imgs.size(0), 1)\n valid = valid.type_as(imgs)\n\n real_loss = self.adversarial_loss(self.discriminator(imgs), valid)\n\n # how well can it label as fake?\n fake = torch.zeros(imgs.size(0), 1)\n fake = fake.type_as(imgs)\n\n fake_loss = self.adversarial_loss(\n self.discriminator(self(z).detach()), fake)\n\n # discriminator loss is the average of these\n d_loss = (real_loss + fake_loss) / 2\n tqdm_dict = {'d_loss': d_loss}\n self.log_dict(tqdm_dict)\n\n return d_loss\n\n def configure_optimizers(self):\n lr = self.hparams.lr\n b1 = self.hparams.b1\n b2 = self.hparams.b2\n\n opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))\n opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))\n return [opt_g, opt_d], []\n\n def on_epoch_end(self):\n z = self.validation_z.type_as(self.generator.model[0].weight)\n\n # log sampled images\n sample_imgs = self(z)\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('generated_images', grid, self.current_epoch)\n\n\nclass MNISTDataModule(LightningDataModule):\n def __init__(self, batch_size: int = 64, data_path: str = os.getcwd(), num_workers: int = 4):\n super().__init__()\n self.batch_size = batch_size\n self.data_path = data_path\n self.num_workers = num_workers\n\n self.transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])])\n self.dims = (1, 28, 28)\n\n def prepare_data(self, stage=None):\n # Use this method to do things that might write to disk or that need to be done only from a single GPU\n # in distributed settings. Like downloading the dataset for the first time.\n MNIST(self.data_path, train=True, download=True, transform=transforms.ToTensor())\n\n def setup(self, stage=None):\n # There are also data operations you might want to perform on every GPU, such as applying transforms\n # defined explicitly in your datamodule or assigned in init.\n self.mnist_train = MNIST(self.data_path, train=True, transform=self.transform)\n\n def train_dataloader(self):\n return DataLoader(self.mnist_train, batch_size=self.batch_size, num_workers=self.num_workers)\n\n\ndef main(args: Namespace) -> None:\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = GAN(args)\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n # If use distubuted training PyTorch recommends to use DistributedDataParallel.\n # See: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel\n dm = MNISTDataModule.from_argparse_args(args)\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model, dm)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n\n # Add program level args, if any.\n # ------------------------\n # Add LightningDataLoader args\n parser = MNISTDataModule.add_argparse_args(parser)\n # Add model specific args\n parser = GAN.add_argparse_args(parser)\n # Add trainer args\n parser = Trainer.add_argparse_args(parser)\n # Parse all arguments\n args = parser.parse_args()\n\n main(args)\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.zeros",
"torch.randn",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.utils.data.DataLoader",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"numpy.prod"
]
] |
XiangwenNing/dgl
|
[
"5542b7654af326a84edb1d28d050febd06d3dbcd"
] |
[
"tests/pytorch/test_dataloader.py"
] |
[
"import os\nimport dgl\nimport dgl.ops as OPS\nimport backend as F\nimport unittest\nimport torch\nfrom torch.utils.data import DataLoader\nfrom collections import defaultdict\nfrom collections.abc import Iterator\nfrom itertools import product\nimport pytest\n\n\ndef test_graph_dataloader():\n batch_size = 16\n num_batches = 2\n minigc_dataset = dgl.data.MiniGCDataset(batch_size * num_batches, 10, 20)\n data_loader = dgl.dataloading.GraphDataLoader(minigc_dataset, batch_size=batch_size, shuffle=True)\n assert isinstance(iter(data_loader), Iterator)\n for graph, label in data_loader:\n assert isinstance(graph, dgl.DGLGraph)\n assert F.asnumpy(label).shape[0] == batch_size\n\n@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')\n@pytest.mark.parametrize('num_workers', [0, 4])\ndef test_cluster_gcn(num_workers):\n dataset = dgl.data.CoraFullDataset()\n g = dataset[0]\n sampler = dgl.dataloading.ClusterGCNSampler(g, 100)\n dataloader = dgl.dataloading.DataLoader(\n g, torch.arange(100), sampler, batch_size=4, num_workers=num_workers)\n assert len(dataloader) == 25\n for i, sg in enumerate(dataloader):\n pass\n\n@pytest.mark.parametrize('num_workers', [0, 4])\ndef test_shadow(num_workers):\n g = dgl.data.CoraFullDataset()[0]\n sampler = dgl.dataloading.ShaDowKHopSampler([5, 10, 15])\n dataloader = dgl.dataloading.NodeDataLoader(\n g, torch.arange(g.num_nodes()), sampler,\n batch_size=5, shuffle=True, drop_last=False, num_workers=num_workers)\n for i, (input_nodes, output_nodes, subgraph) in enumerate(dataloader):\n assert torch.equal(input_nodes, subgraph.ndata[dgl.NID])\n assert torch.equal(input_nodes[:output_nodes.shape[0]], output_nodes)\n assert torch.equal(subgraph.ndata['label'], g.ndata['label'][input_nodes])\n assert torch.equal(subgraph.ndata['feat'], g.ndata['feat'][input_nodes])\n if i == 5:\n break\n\n\n@pytest.mark.parametrize('num_workers', [0, 4])\ndef test_neighbor_nonuniform(num_workers):\n g = dgl.graph(([1, 2, 3, 4, 5, 6, 7, 8], [0, 0, 0, 0, 1, 1, 1, 1]))\n g.edata['p'] = torch.FloatTensor([1, 1, 0, 0, 1, 1, 0, 0])\n sampler = dgl.dataloading.MultiLayerNeighborSampler([2], prob='p')\n dataloader = dgl.dataloading.NodeDataLoader(g, [0, 1], sampler, batch_size=1, device=F.ctx())\n for input_nodes, output_nodes, blocks in dataloader:\n seed = output_nodes.item()\n neighbors = set(input_nodes[1:].cpu().numpy())\n if seed == 1:\n assert neighbors == {5, 6}\n elif seed == 0:\n assert neighbors == {1, 2}\n\n g = dgl.heterograph({\n ('B', 'BA', 'A'): ([1, 2, 3, 4, 5, 6, 7, 8], [0, 0, 0, 0, 1, 1, 1, 1]),\n ('C', 'CA', 'A'): ([1, 2, 3, 4, 5, 6, 7, 8], [0, 0, 0, 0, 1, 1, 1, 1]),\n })\n g.edges['BA'].data['p'] = torch.FloatTensor([1, 1, 0, 0, 1, 1, 0, 0])\n g.edges['CA'].data['p'] = torch.FloatTensor([0, 0, 1, 1, 0, 0, 1, 1])\n sampler = dgl.dataloading.MultiLayerNeighborSampler([2], prob='p')\n dataloader = dgl.dataloading.NodeDataLoader(\n g, {'A': [0, 1]}, sampler, batch_size=1, device=F.ctx())\n for input_nodes, output_nodes, blocks in dataloader:\n seed = output_nodes['A'].item()\n # Seed and neighbors are of different node types so slicing is not necessary here.\n neighbors = set(input_nodes['B'].cpu().numpy())\n if seed == 1:\n assert neighbors == {5, 6}\n elif seed == 0:\n assert neighbors == {1, 2}\n\n neighbors = set(input_nodes['C'].cpu().numpy())\n if seed == 1:\n assert neighbors == {7, 8}\n elif seed == 0:\n assert neighbors == {3, 4}\n\n\ndef _check_device(data):\n if isinstance(data, dict):\n for k, v in data.items():\n assert v.device == F.ctx()\n elif isinstance(data, list):\n for v in data:\n assert v.device == F.ctx()\n else:\n assert data.device == F.ctx()\n\n@pytest.mark.parametrize('sampler_name', ['full', 'neighbor', 'neighbor2'])\n@pytest.mark.parametrize('pin_graph', [True, False])\ndef test_node_dataloader(sampler_name, pin_graph):\n g1 = dgl.graph(([0, 0, 0, 1, 1], [1, 2, 3, 3, 4]))\n if F.ctx() != F.cpu() and pin_graph:\n g1.create_formats_()\n g1.pin_memory_()\n g1.ndata['feat'] = F.copy_to(F.randn((5, 8)), F.cpu())\n g1.ndata['label'] = F.copy_to(F.randn((g1.num_nodes(),)), F.cpu())\n\n for num_workers in [0, 1, 2]:\n sampler = {\n 'full': dgl.dataloading.MultiLayerFullNeighborSampler(2),\n 'neighbor': dgl.dataloading.MultiLayerNeighborSampler([3, 3]),\n 'neighbor2': dgl.dataloading.MultiLayerNeighborSampler([3, 3])}[sampler_name]\n dataloader = dgl.dataloading.NodeDataLoader(\n g1, g1.nodes(), sampler, device=F.ctx(),\n batch_size=g1.num_nodes(),\n num_workers=num_workers)\n for input_nodes, output_nodes, blocks in dataloader:\n _check_device(input_nodes)\n _check_device(output_nodes)\n _check_device(blocks)\n\n g2 = dgl.heterograph({\n ('user', 'follow', 'user'): ([0, 0, 0, 1, 1, 1, 2], [1, 2, 3, 0, 2, 3, 0]),\n ('user', 'followed-by', 'user'): ([1, 2, 3, 0, 2, 3, 0], [0, 0, 0, 1, 1, 1, 2]),\n ('user', 'play', 'game'): ([0, 1, 1, 3, 5], [0, 1, 2, 0, 2]),\n ('game', 'played-by', 'user'): ([0, 1, 2, 0, 2], [0, 1, 1, 3, 5])\n })\n for ntype in g2.ntypes:\n g2.nodes[ntype].data['feat'] = F.copy_to(F.randn((g2.num_nodes(ntype), 8)), F.cpu())\n batch_size = max(g2.num_nodes(nty) for nty in g2.ntypes)\n sampler = {\n 'full': dgl.dataloading.MultiLayerFullNeighborSampler(2),\n 'neighbor': dgl.dataloading.MultiLayerNeighborSampler([{etype: 3 for etype in g2.etypes}] * 2),\n 'neighbor2': dgl.dataloading.MultiLayerNeighborSampler([3, 3])}[sampler_name]\n\n dataloader = dgl.dataloading.NodeDataLoader(\n g2, {nty: g2.nodes(nty) for nty in g2.ntypes},\n sampler, device=F.ctx(), batch_size=batch_size)\n assert isinstance(iter(dataloader), Iterator)\n for input_nodes, output_nodes, blocks in dataloader:\n _check_device(input_nodes)\n _check_device(output_nodes)\n _check_device(blocks)\n\n if g1.is_pinned():\n g1.unpin_memory_()\n\n@pytest.mark.parametrize('sampler_name', ['full', 'neighbor'])\n@pytest.mark.parametrize('neg_sampler', [\n dgl.dataloading.negative_sampler.Uniform(2),\n dgl.dataloading.negative_sampler.GlobalUniform(15, False, 3),\n dgl.dataloading.negative_sampler.GlobalUniform(15, True, 3)])\n@pytest.mark.parametrize('pin_graph', [True, False])\ndef test_edge_dataloader(sampler_name, neg_sampler, pin_graph):\n g1 = dgl.graph(([0, 0, 0, 1, 1], [1, 2, 3, 3, 4]))\n if F.ctx() != F.cpu() and pin_graph:\n g1.create_formats_()\n g1.pin_memory_()\n g1.ndata['feat'] = F.copy_to(F.randn((5, 8)), F.cpu())\n\n sampler = {\n 'full': dgl.dataloading.MultiLayerFullNeighborSampler(2),\n 'neighbor': dgl.dataloading.MultiLayerNeighborSampler([3, 3])}[sampler_name]\n\n # no negative sampler\n dataloader = dgl.dataloading.EdgeDataLoader(\n g1, g1.edges(form='eid'), sampler, device=F.ctx(), batch_size=g1.num_edges())\n for input_nodes, pos_pair_graph, blocks in dataloader:\n _check_device(input_nodes)\n _check_device(pos_pair_graph)\n _check_device(blocks)\n\n # negative sampler\n dataloader = dgl.dataloading.EdgeDataLoader(\n g1, g1.edges(form='eid'), sampler, device=F.ctx(),\n negative_sampler=neg_sampler, batch_size=g1.num_edges())\n for input_nodes, pos_pair_graph, neg_pair_graph, blocks in dataloader:\n _check_device(input_nodes)\n _check_device(pos_pair_graph)\n _check_device(neg_pair_graph)\n _check_device(blocks)\n\n g2 = dgl.heterograph({\n ('user', 'follow', 'user'): ([0, 0, 0, 1, 1, 1, 2], [1, 2, 3, 0, 2, 3, 0]),\n ('user', 'followed-by', 'user'): ([1, 2, 3, 0, 2, 3, 0], [0, 0, 0, 1, 1, 1, 2]),\n ('user', 'play', 'game'): ([0, 1, 1, 3, 5], [0, 1, 2, 0, 2]),\n ('game', 'played-by', 'user'): ([0, 1, 2, 0, 2], [0, 1, 1, 3, 5])\n })\n for ntype in g2.ntypes:\n g2.nodes[ntype].data['feat'] = F.copy_to(F.randn((g2.num_nodes(ntype), 8)), F.cpu())\n batch_size = max(g2.num_edges(ety) for ety in g2.canonical_etypes)\n sampler = {\n 'full': dgl.dataloading.MultiLayerFullNeighborSampler(2),\n 'neighbor': dgl.dataloading.MultiLayerNeighborSampler([{etype: 3 for etype in g2.etypes}] * 2),\n }[sampler_name]\n\n # no negative sampler\n dataloader = dgl.dataloading.EdgeDataLoader(\n g2, {ety: g2.edges(form='eid', etype=ety) for ety in g2.canonical_etypes},\n sampler, device=F.ctx(), batch_size=batch_size)\n for input_nodes, pos_pair_graph, blocks in dataloader:\n _check_device(input_nodes)\n _check_device(pos_pair_graph)\n _check_device(blocks)\n\n # negative sampler\n dataloader = dgl.dataloading.EdgeDataLoader(\n g2, {ety: g2.edges(form='eid', etype=ety) for ety in g2.canonical_etypes},\n sampler, device=F.ctx(), negative_sampler=neg_sampler,\n batch_size=batch_size)\n\n assert isinstance(iter(dataloader), Iterator)\n for input_nodes, pos_pair_graph, neg_pair_graph, blocks in dataloader:\n _check_device(input_nodes)\n _check_device(pos_pair_graph)\n _check_device(neg_pair_graph)\n _check_device(blocks)\n\n if g1.is_pinned():\n g1.unpin_memory_()\n\nif __name__ == '__main__':\n test_graph_dataloader()\n test_cluster_gcn(0)\n test_neighbor_nonuniform(0)\n for sampler in ['full', 'neighbor']:\n test_node_dataloader(sampler)\n for neg_sampler in [\n dgl.dataloading.negative_sampler.Uniform(2),\n dgl.dataloading.negative_sampler.GlobalUniform(2, False),\n dgl.dataloading.negative_sampler.GlobalUniform(2, True)]:\n for pin_graph in [True, False]:\n test_edge_dataloader(sampler, neg_sampler, pin_graph)\n"
] |
[
[
"torch.FloatTensor",
"torch.equal",
"torch.arange"
]
] |
jeremyephron/forager
|
[
"6db1590686e0e34b2e42ff5deb70f62fcee73d7d"
] |
[
"index_server/containers/clip-text-inference/handler.py"
] |
[
"import torch\nimport clip\n\nfrom typing import List\n\nfrom knn import utils\nfrom knn.mappers import Mapper\n\nimport config\n\n\ntorch.set_grad_enabled(False)\ntorch.set_num_threads(1)\n\n\nclass TextEmbeddingMapper(Mapper):\n def initialize_container(self):\n self.model, _ = clip.load(config.CLIP_MODEL, device=\"cpu\")\n\n @utils.log_exception_from_coro_but_return_none\n async def process_chunk(\n self, chunk: List[str], job_id, job_args, request_id\n ) -> List[str]:\n with torch.no_grad():\n text = clip.tokenize(chunk)\n text_features = self.model.encode_text(text)\n text_features /= text_features.norm(dim=-1, keepdim=True)\n return list(map(utils.numpy_to_base64, text_features.numpy()))\n\n async def process_element(self, *args, **kwargs):\n raise NotImplementedError()\n\n\napp = TextEmbeddingMapper().server\n"
] |
[
[
"torch.no_grad",
"torch.set_grad_enabled",
"torch.set_num_threads"
]
] |
aluscher/torchbeastpopart
|
[
"1c710dd4c78d24ed73a5732ad7ba14ce578143f2"
] |
[
"torchbeast/models/attention_augmented_agent.py"
] |
[
"\"\"\"\nAdapted from https://github.com/cjlovering/Towards-Interpretable-Reinforcement-Learning-Using-Attention-Augmented-Agents-Replication\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom torchbeast.core.popart import PopArtLayer\n\n\nclass ConvLSTMCell(nn.Module):\n\n def __init__(self, input_channels, hidden_channels, kernel_size):\n \"\"\"From the original implementation:\n Paper\n -----\n https://papers.nips.cc/paper/5955-convolutional-lstm-network-a-machine-learning-approach-for-precipitation-nowcasting.pdf\n\n Referenced code\n ---------------\n https://github.com/automan000/Convolution_LSTM_PyTorch/blob/master/convolution_lstm.py\n \"\"\"\n super(ConvLSTMCell, self).__init__()\n\n assert hidden_channels % 2 == 0\n\n self.input_channels = input_channels\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.num_features = 4\n\n self.padding = int((kernel_size - 1) / 2)\n\n self.Wxi = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Whi = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n self.Wxf = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Whf = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n self.Wxc = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Whc = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n self.Wxo = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Who = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n\n self.Wci = None\n self.Wcf = None\n self.Wco = None\n\n def initial_state(self, batch_size, hidden, height, width):\n return self.init_hidden(batch_size, hidden, height, width)\n\n def init_hidden(self, batch_size, hidden, height, width):\n if self.Wci is None:\n self.Wci = torch.zeros(1, hidden, height, width, requires_grad=True)\n self.Wcf = torch.zeros(1, hidden, height, width, requires_grad=True)\n self.Wco = torch.zeros(1, hidden, height, width, requires_grad=True)\n return (\n torch.zeros(batch_size, hidden, height, width),\n torch.zeros(batch_size, hidden, height, width)\n )\n\n def forward(self, x, prev_hidden=()):\n if self.Wci is None:\n _, _, height, width = x.shape\n hidden = self.hidden_channels\n self.Wci = torch.zeros(1, hidden, height, width, requires_grad=True).to(x.device)\n self.Wcf = torch.zeros(1, hidden, height, width, requires_grad=True).to(x.device)\n self.Wco = torch.zeros(1, hidden, height, width, requires_grad=True).to(x.device)\n\n h, c = prev_hidden\n\n ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)\n cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)\n cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))\n co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)\n ch = co * torch.tanh(cc)\n\n return ch, cc\n\n\nclass VisionNetwork(nn.Module):\n\n def __init__(self, frame_height, frame_width, in_channels=3, hidden_channels=128):\n super(VisionNetwork, self).__init__()\n self._frame_height = frame_height\n self._frame_width = frame_width\n self._in_channels = in_channels\n self._hidden_channels = hidden_channels\n\n # padding s.t. the output shapes match the paper.\n self.vision_cnn = nn.Sequential(\n nn.Conv2d(in_channels=self._in_channels, out_channels=32, kernel_size=(8, 8), stride=4, padding=1),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(4, 4), stride=2, padding=2)\n )\n self.vision_lstm = ConvLSTMCell(input_channels=64, hidden_channels=self._hidden_channels, kernel_size=3)\n\n def initial_state(self, batch_size, dummy_frame):\n cnn_output = self.vision_cnn(dummy_frame)\n height, width = tuple(cnn_output.shape[2:])\n return self.vision_lstm.initial_state(batch_size, self._hidden_channels, height, width)\n\n def forward(self, x, prev_vision_core_state):\n x = x.permute(0, 3, 1, 2)\n vision_core_output, vision_core_state = self.vision_lstm(self.vision_cnn(x), prev_vision_core_state)\n return vision_core_output.permute(0, 2, 3, 1), (vision_core_output, vision_core_state)\n\n\nclass QueryNetwork(nn.Module):\n\n def __init__(self, num_queries, c_k, c_s):\n super(QueryNetwork, self, ).__init__()\n self._num_queries = num_queries\n self._c_o = c_k + c_s\n self.model = nn.Sequential(\n nn.Linear(256, 128),\n nn.ReLU(),\n nn.Linear(128, self._num_queries * self._c_o),\n nn.ReLU(),\n nn.Linear(self._num_queries * self._c_o, self._num_queries * self._c_o)\n )\n\n def forward(self, query):\n out = self.model(query)\n return out.reshape(-1, self._num_queries, self._c_o)\n\n\nclass SpatialBasis:\n\n def __init__(self, height=27, width=20, channels=64):\n self._height = height\n self._width = width\n self._channels = channels\n self._s = None\n\n self.init()\n\n def __call__(self, x):\n batch_size, x_height, x_width, *_ = x.size()\n re_init = False\n if self._height != x_height:\n self._height = x_height\n re_init = True\n if self._width != x_width:\n self._width = x_width\n re_init = True\n if re_init:\n self.init()\n\n # Stack the spatial bias (for each batch) and concat to the input.\n s = torch.stack([self._s] * batch_size).to(x.device)\n return torch.cat([x, s], dim=3)\n\n def init(self):\n h, w, d = self._height, self._width, self._channels\n\n p_h = torch.mul(torch.arange(1, h + 1).unsqueeze(1).float(), torch.ones(1, w).float()) * (np.pi / h)\n p_w = torch.mul(torch.ones(h, 1).float(), torch.arange(1, w + 1).unsqueeze(0).float()) * (np.pi / w)\n\n # NOTE: I didn't quite see how U,V = 4 made sense given that the authors form the spatial\n # basis by taking the outer product of the values. Still, I think what I have is aligned with what\n # they did, but I am less confident in this step.\n U = V = 8 # size of U, V.\n u_basis = v_basis = torch.arange(1, U + 1).unsqueeze(0).float()\n a = torch.mul(p_h.unsqueeze(2), u_basis)\n b = torch.mul(p_w.unsqueeze(2), v_basis)\n out = torch.einsum('hwu,hwv->hwuv', torch.cos(a), torch.cos(b)).reshape(h, w, d)\n self._s = out\n\n\ndef spatial_softmax(A):\n # A: batch_size x h x w x d\n b, h, w, d = A.size()\n # Flatten A s.t. softmax is applied to each grid (not over queries)\n A = A.reshape(b, h * w, d)\n A = F.softmax(A, dim=1)\n # Reshape A to original shape.\n A = A.reshape(b, h, w, d)\n return A\n\n\ndef apply_alpha(A, V):\n b, h, w, c = A.size()\n A = A.reshape(b, h * w, c).transpose(1, 2)\n\n _, _, _, d = V.size()\n V = V.reshape(b, h * w, d)\n\n return torch.matmul(A, V)\n\n\nclass AttentionAugmentedAgent(nn.Module):\n\n def __init__(\n self,\n observation_shape,\n num_actions,\n hidden_size: int = 256,\n c_v: int = 120,\n c_k: int = 8,\n c_s: int = 64,\n num_queries: int = 4,\n rgb_last: bool = False,\n num_tasks: int = 1,\n use_popart: bool = False,\n **kwargs\n ):\n super(AttentionAugmentedAgent, self).__init__()\n self.hidden_size = hidden_size\n self.observation_shape = observation_shape\n self.num_actions = num_actions\n self.rgb_last = rgb_last\n self.num_tasks = num_tasks\n self.use_popart = use_popart\n self.c_v, self.c_k, self.c_s, self.num_queries = c_v, c_k, c_s, num_queries\n if self.rgb_last:\n self.observation_shape = (3,) + tuple(self.observation_shape[1:])\n\n self.config = {\n \"observation_shape\": observation_shape,\n \"num_actions\": num_actions,\n \"hidden_size\": hidden_size,\n \"c_v\": c_v,\n \"c_k\": c_k,\n \"c_s\": c_s,\n \"num_queries\": num_queries,\n \"rgb_last\": rgb_last\n }\n self.config.update(kwargs)\n\n self.vision = VisionNetwork(self.observation_shape[1], self.observation_shape[2],\n in_channels=self.observation_shape[0])\n self.query = QueryNetwork(num_queries, c_k, c_s)\n self.spatial = SpatialBasis()\n\n self.answer_processor = nn.Sequential(\n # 1031 x 512\n nn.Linear((c_v + c_s) * num_queries + (c_k + c_s) * num_queries + 1 + 1, 512),\n nn.ReLU(),\n nn.Linear(512, hidden_size),\n )\n\n self.policy_core = nn.LSTM(hidden_size, hidden_size)\n\n self.policy_head = nn.Linear(hidden_size, num_actions)\n self.baseline_head = PopArtLayer(hidden_size, num_tasks if self.use_popart else 1)\n\n def initial_state(self, batch_size):\n with torch.no_grad():\n dummy_frame = torch.zeros(1, *self.observation_shape)\n vision_core_initial_state = tuple(\n s.unsqueeze(0) for s in self.vision.initial_state(batch_size, dummy_frame)\n )\n # unsqueeze() here as well as in forward() is necessary because some of the code in monobeast.py assumes that\n # the first dimension of the returned state tensors are layers of the RNN, so we need this \"dummy dimension\"\n\n policy_core_initial_state = tuple(\n torch.zeros(self.policy_core.num_layers, batch_size, self.policy_core.hidden_size)\n for _ in range(2)\n )\n return vision_core_initial_state + policy_core_initial_state\n\n def forward(self, inputs, state=(), return_attention_maps=False):\n # input frames are formatted: (time_steps, batch_size, frame_stack, height, width)\n # the original network is designed for (batch_size, height, width, num_channels)\n # there are a couple options to solve this:\n # - use grayscale, stack frames, use those as channels\n # - use full colour, stack frames, resulting in 4 * 3 = 12 channels\n # - use full colour, don't stack frames (similar to original paper)\n # IMPORTANT NOTE: for the latter, the original paper still sends the same action 4 times,\n # so the following might be a better option (as far as implementation goes)\n # - use full colour, stack frames, use only the last one\n # => for now, I'm just going to use the first method\n\n # (time_steps, batch_size, frame_stack, height, width)\n x: torch.Tensor = inputs[\"frame\"]\n time_steps, batch_size, *_ = x.shape\n # (time_steps, batch_size, frame_stack, height, width)\n x = x.float() / 255.0\n # (time_steps, batch_size, height, width, frame_stack) to match the design of the network\n x = x.permute(0, 1, 3, 4, 2)\n # frames are RGB and only the first should be used\n if self.rgb_last:\n x = x[:, :, :, :, -3:]\n\n # (time_steps, batch_size, 1)\n prev_reward = inputs[\"reward\"].view(time_steps, batch_size, 1)\n # (time_steps, batch_size, num_actions)\n # prev_action = F.one_hot(inputs[\"last_action\"].view(time_steps, batch_size), self.num_actions).float()\n prev_action = inputs[\"last_action\"].view(time_steps, batch_size, 1)\n # (time_steps, batch_size)\n not_done = (~inputs[\"done\"]).float()\n\n vision_core_output_list = []\n vision_core_state = tuple(s.squeeze(0) for s in state[:2]) # see comment in initial_state()\n for x_batch, not_done_batch in zip(x.unbind(), not_done.unbind()):\n\n # (batch_size, 1, 1, 1) => expanded to be broadcastable for the multiplication\n not_done_batch = not_done_batch.view(-1, 1, 1, 1)\n # (batch_size, c_k + c_v, height_ac=height_after_cnn, width_ac=width_after_cnn) * 2\n vision_core_state = tuple(not_done_batch * s for s in vision_core_state)\n\n # 1 (a). Vision.\n # --------------\n # (batch_size, height_ac, width_ac, c_k + c_v)\n vision_core_output, vision_core_state = self.vision(x_batch, vision_core_state)\n vision_core_output_list.append(vision_core_output)\n # for clarity vision_core_output.unsqueeze(0) might be better, because it would be clear that this\n # is the result for one time step, but since we merge time and batch in the following steps anyway,\n # we can also just \"discard\" the time dimension and get the same result when we concatenate\n # the results for each time step\n\n vision_core_state = tuple(s.unsqueeze(0) for s in vision_core_state) # see comment in initial_state()\n\n # (time_steps * batch_size, height_ac, width_ac, c_k + c_v)\n vision_core_output = torch.cat(vision_core_output_list)\n\n # (batch_size, height_ac, width_ac, c_k), (batch_size, height_ac, width_ac, c_v)\n keys, values = vision_core_output.split([self.c_k, self.c_v], dim=3)\n # (batch_size, height_ac, width_ac, c_k + c_s), (batch_size, height_ac, width_ac, c_v + c_s)\n keys, values = self.spatial(keys), self.spatial(values)\n\n # reshape the keys and values tensors so that they can be separated in the time dimension\n # (time_steps, batch_size, height_ac, width_ac, c_k + c_s)\n keys = keys.view(time_steps, batch_size, *keys.shape[1:])\n # (time_steps, batch_size, height_ac, width_ac, c_v + c_s)\n values = values.view(time_steps, batch_size, *values.shape[1:])\n\n policy_core_output_list = []\n attention_map_list = []\n policy_core_state = state[2:]\n for keys_batch, values_batch, prev_reward_batch, prev_action_batch, not_done_batch in zip(\n keys.unbind(), values.unbind(), prev_reward.unbind(), prev_action.unbind(), not_done.unbind()):\n\n # (1, batch_size, 1)\n not_done_batch = not_done_batch.view(1, -1, 1)\n # (lstm_layers, batch_size, hidden_size) * 2\n policy_core_state = tuple(not_done_batch * s for s in policy_core_state)\n\n # 1 (b). Queries.\n # --------------\n # (batch_size, num_queries, c_k + c_s)\n queries = self.query(policy_core_state[0])\n\n # 2. Answer.\n # ----------\n # (batch_size, height_ac, width_ac, num_queries)\n answer = torch.matmul(keys_batch, queries.transpose(2, 1).unsqueeze(1))\n # (batch_size, height_ac, width_ac, num_queries)\n answer = spatial_softmax(answer)\n attention_map_list.append(answer)\n # (batch_size, num_queries, c_v + c_s)\n answer = apply_alpha(answer, values_batch)\n\n # (batch_size, (c_v + c_s) * num_queries + (c_k + c_s) * num_queries + 1 + num_actions)\n answer = torch.cat(\n torch.chunk(answer, self.num_queries, dim=1)\n + torch.chunk(queries, self.num_queries, dim=1)\n + (prev_reward_batch.unsqueeze(1).float(), prev_action_batch.unsqueeze(1).float()),\n dim=2,\n ).squeeze(1)\n # (batch_size, hidden_size)\n answer = self.answer_processor(answer)\n\n # 3. Policy.\n # ----------\n # (batch_size, hidden_size)\n policy_core_output, policy_core_state = self.policy_core(answer.unsqueeze(0), policy_core_state)\n policy_core_output_list.append(policy_core_output.squeeze(0))\n # squeeze() is needed because the LSTM input has an \"extra\" dimensions for the layers of the LSTM,\n # of which there is only one in this case; therefore, the concatenated input vector has an extra\n # dimension and the output as well\n\n # (time_steps * batch_size, hidden_size)\n output = torch.cat(policy_core_output_list)\n attention_maps = torch.cat(attention_map_list)\n\n # 4, 5. Outputs.\n # --------------\n # (time_steps * batch_size, num_actions)\n policy_logits = self.policy_head(output)\n # (time_steps * batch_size, num_tasks)\n baseline, normalized_baseline = self.baseline_head(output)\n\n # (time_steps * batch_size, 1)\n if self.training:\n action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)\n else:\n action = torch.argmax(policy_logits, dim=1)\n\n # (time_steps, batch_size, num_actions)\n policy_logits = policy_logits.view(time_steps, batch_size, self.num_actions)\n # (time_steps, batch_size, num_tasks)\n baseline = baseline.view(time_steps, batch_size, self.num_tasks)\n normalized_baseline = normalized_baseline.view(time_steps, batch_size, self.num_tasks)\n # (time_steps, batch_size, 1)\n action = action.view(time_steps, batch_size, 1)\n\n if return_attention_maps:\n return (\n dict(policy_logits=policy_logits, baseline=baseline, action=action,\n normalized_baseline=normalized_baseline),\n vision_core_state + policy_core_state,\n attention_maps\n )\n\n return (\n dict(policy_logits=policy_logits, baseline=baseline, action=action,\n normalized_baseline=normalized_baseline),\n vision_core_state + policy_core_state\n )\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.ones",
"torch.chunk",
"torch.nn.LSTM",
"torch.cat",
"torch.zeros",
"torch.nn.Conv2d",
"torch.tanh",
"torch.matmul",
"torch.nn.Linear",
"torch.no_grad",
"torch.arange",
"torch.stack",
"torch.nn.ReLU",
"torch.cos",
"torch.argmax"
]
] |
15091444119/NJUNMT-pytorch
|
[
"5d3947578deb5c5c9b0b5967c9010645c0c981d1"
] |
[
"src/modules/cgru.py"
] |
[
"import torch.nn as nn\n\nfrom src.utils import init as my_init\nfrom .attention import BahdanauAttention\n\nclass CGRUCell(nn.Module):\n\n def __init__(self,\n input_size,\n hidden_size):\n\n super(CGRUCell, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.gru1 = nn.GRUCell(input_size=input_size, hidden_size=hidden_size)\n self.attn = BahdanauAttention(query_size=hidden_size, key_size=self.context_size)\n self.gru2 = nn.GRUCell(input_size=self.context_size, hidden_size=hidden_size)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for weight in self.gru1.parameters():\n my_init.rnn_init(weight)\n\n for weight in self.gru2.parameters():\n my_init.rnn_init(weight)\n\n\n @property\n def context_size(self):\n return self.hidden_size * 2\n\n def forward(self,\n input,\n hidden,\n context,\n context_mask=None,\n cache=None):\n\n hidden1 = self.gru1(input, hidden)\n attn_values, _ = self.attn(query=hidden1, memory=context, cache=cache, mask=context_mask)\n hidden2 = self.gru2(attn_values, hidden1)\n\n return (hidden2, attn_values), hidden2\n\n def compute_cache(self, memory):\n return self.attn.compute_cache(memory)"
] |
[
[
"torch.nn.GRUCell"
]
] |
solvithrastar/Inversionson
|
[
"a91ffc4cadaf30dc40b6f222e1c3d0bf105d2163"
] |
[
"inversionson/components/mesh_comp.py"
] |
[
"from __future__ import absolute_import\nfrom typing import NoReturn\nfrom .component import Component\nimport numpy as np\nimport sys\nimport shutil\nfrom pathlib import Path\nimport os\nfrom inversionson import InversionsonError\nfrom salvus.mesh.unstructured_mesh import UnstructuredMesh\nimport h5py\n\n\nclass SalvusMeshComponent(Component):\n \"\"\"\n Communications with Salvus Mesh.\n This will have to be done in a temporary way to begin with\n as it is not possible to make smoothiesem meshes through\n a nice config as things stand.\n\n :param infodict: Information related to inversion project\n :type infodict: Dictionary\n \"\"\"\n\n def __init__(self, communicator, component_name):\n super(SalvusMeshComponent, self).__init__(communicator, component_name)\n self.meshes = Path(self.comm.project.lasif_root) / \"MODELS\"\n self.event_meshes = self.meshes / \"EVENT_MESHES\"\n self.average_meshes = self.meshes / \"AVERAGE_MESHES\"\n\n def create_mesh(self, event: str):\n \"\"\"\n Create a smoothiesem mesh for an event. I'll keep refinements fixed\n for now.\n \n :param event: Name of event\n :type event: str\n \"\"\"\n\n from salvus.mesh.simple_mesh import SmoothieSEM\n\n source_info = self.comm.lasif.get_source(event_name=event)\n if isinstance(source_info, list):\n source_info = source_info[0]\n sm = SmoothieSEM()\n sm.basic.model = \"prem_ani_one_crust\"\n sm.basic.min_period_in_seconds = self.comm.project.min_period\n sm.basic.elements_per_wavelength = 1.7\n sm.basic.number_of_lateral_elements = (\n self.comm.project.elem_per_quarter\n )\n sm.advanced.tensor_order = 4\n if self.comm.project.ellipticity:\n sm.spherical.ellipticity = 0.0033528106647474805\n if self.comm.project.ocean_loading[\"use\"]:\n sm.ocean.bathymetry_file = self.comm.project.ocean_loading[\"file\"]\n sm.ocean.bathymetry_varname = self.comm.project.ocean_loading[\n \"variable\"\n ]\n sm.ocean.ocean_layer_style = \"loading\"\n sm.ocean.ocean_layer_density = 1025.0\n if self.comm.project.topography[\"use\"]:\n sm.topography.topography_file = self.comm.project.topography[\n \"file\"\n ]\n sm.topography.topography_varname = self.comm.project.topography[\n \"variable\"\n ]\n sm.source.latitude = source_info[\"latitude\"]\n sm.source.longitude = source_info[\"longitude\"]\n sm.refinement.lateral_refinements.append(\n {\"theta_min\": 40.0, \"theta_max\": 140.0, \"r_min\": 6250.0}\n )\n m = sm.create_mesh()\n mesh_file = self.event_meshes / event / \"mesh.h5\"\n if not os.path.exists(os.path.dirname(mesh_file)):\n os.makedirs(os.path.dirname(mesh_file))\n m.write_h5(mesh_file)\n\n def _check_if_mesh_has_field(\n self,\n check_mesh: str,\n field_name: str,\n elemental: bool,\n global_string: bool,\n side_sets: bool,\n ) -> bool:\n \"\"\"\n Use h5py to quickly check whether field exists on mesh\n\n :param check_mesh: path to mesh to check\n :type check_mesh: str\n :param field_name: Name of field\n :type field_name: str\n :param elemental: Is field an elemental field\n :type elemental: bool\n :param global_string: Is it a global string\n :type global_string: bool\n :param side_sets: Are we checking for side sets? Provide the name,\n :type side_sets: str\n \"\"\"\n with h5py.File(check_mesh, mode=\"r\") as mesh:\n if global_string:\n global_strings = list(mesh[\"MODEL\"].attrs.keys())\n if field_name in global_strings:\n return True\n else:\n return False\n if elemental:\n if \"element_data\" in mesh[\"MODEL\"].keys():\n elemental_fields = mesh[\"MODEL/element_data\"].attrs.get(\n \"DIMENSION_LABELS\"\n )[1]\n elemental_fields = (\n elemental_fields[2:-2].replace(\" \", \"\").split(\"|\")\n )\n if field_name in elemental_fields:\n return True\n else:\n return False\n else:\n return False\n if side_sets:\n if \"SIDE_SETS\" in mesh.keys():\n return True\n else:\n return False\n else:\n # Here we assume it's an element_nodal_field\n nodal_fields = mesh[\"MODEL/data\"].attrs.get(\n \"DIMENSION_LABELS\"\n )[1]\n nodal_fields = nodal_fields[2:-2].replace(\" \", \"\").split(\"|\")\n if field_name in nodal_fields:\n return True\n else:\n return False\n\n def add_field_from_one_mesh_to_another(\n self,\n from_mesh: str,\n to_mesh: str,\n field_name: str,\n elemental: bool = False,\n global_string: bool = False,\n side_sets: bool = False,\n overwrite: bool = True,\n ):\n \"\"\"\n Add one field from a specific mesh to another mesh. The two meshes\n need to have identical discretisations\n \n :param from_mesh: Path of mesh to copy field from\n :type from_mesh: str\n :param to_mesh: Path of mesh to copy field to\n :type to_mesh: str\n :param field_name: Name of the field to copy between them.\n :type field_name: str\n :param elemental: If the field is elemental make true, defaults to \n False\n :type elemental: bool, optional\n :param global_string: If the field is a global variable, defaults\n to False\n :type global_string: bool, optional\n :param overwrite: We check whether field is existing, if overwrite\n is True, we write it anyway, defaults to True\n :type bool, optional\n \"\"\"\n import os\n import shutil\n\n # has_field = self._check_if_mesh_has_field(\n # check_mesh=from_mesh,\n # field_name=field_name,\n # elemental=elemental,\n # global_string=global_string,\n # )\n has_field = self._check_if_mesh_has_field(\n check_mesh=to_mesh,\n field_name=field_name,\n elemental=elemental,\n global_string=global_string,\n side_sets=side_sets,\n )\n if has_field and not overwrite:\n print(f\"Field: {field_name} already exists on mesh\")\n return\n attach_field = True\n if not os.path.exists(to_mesh):\n print(f\"Mesh {to_mesh} does not exist. Will create new one.\")\n shutil.copy(from_mesh, to_mesh)\n tm = UnstructuredMesh.from_h5(to_mesh)\n # tm.element_nodal_fields = {}\n else:\n tm = UnstructuredMesh.from_h5(to_mesh)\n fm = UnstructuredMesh.from_h5(from_mesh)\n if global_string:\n # if field_name in tm.global_strings.keys():\n # if not overwrite:\n # print(f\"Field {field_name} already exists on mesh\")\n # return\n field = fm.global_strings[field_name]\n tm.attach_global_variable(name=field_name, data=field)\n tm.write_h5(to_mesh)\n print(f\"Attached field {field_name} to mesh {to_mesh}\")\n return\n elif elemental:\n # if field_name in tm.elemental_fields.keys():\n # if not overwrite:\n # print(f\"Field {field_name} already exists on mesh\")\n # return\n field = fm.elemental_fields[field_name]\n elif side_sets:\n for side_set in fm.side_sets.keys():\n tm.define_side_set(\n name=side_set,\n element_ids=fm.side_sets[side_set][0],\n side_ids=fm.side_sets[side_set][1]\n )\n print(f\"Attached side set {side_set} to mesh {to_mesh}\")\n attach_field = False\n\n else:\n # if field_name in tm.element_nodal_fields.keys():\n # if not overwrite:\n # print(f\"Field {field_name} already exists on mesh\")\n # return\n field = fm.element_nodal_fields[field_name]\n if attach_field:\n tm.attach_field(field_name, field)\n print(f\"Attached field {field_name} to mesh {to_mesh}\")\n tm.write_h5(to_mesh)\n \n\n def write_xdmf(self, filename: str):\n \"\"\"\n A hacky way to write an xdmf file for the hdf5 file\n :param filename: path to hdf5 file\n :return:\n \"\"\"\n\n mesh = UnstructuredMesh.from_h5(filename)\n mesh.write_h5(filename)\n\n def add_fluid_and_roi_from_lasif_mesh(self):\n \"\"\"\n For some reason the salvus opt meshes don't have all the necessary info.\n I need this to get them simulation ready. I will write them into the\n lasif folder afterwards.\n As this is a quickfix, I will make it for my specific case.\n \"\"\"\n import os\n import numpy as np\n\n initial_model = self.comm.lasif.lasif_comm.project.lasif_config[\n \"domain_settings\"\n ][\"domain_file\"]\n iteration = self.comm.project.current_iteration\n opt_mesh = os.path.join(\n self.comm.project.paths[\"salvus_opt\"],\n \"PHYSICAL_MODELS\",\n f\"{iteration}.h5\",\n )\n m_opt = UnstructuredMesh.from_h5(opt_mesh)\n m_init = UnstructuredMesh.from_h5(initial_model)\n\n fluid = m_init.elemental_fields[\"fluid\"]\n roi = np.abs(1.0 - fluid)\n\n m_opt.attach_field(name=\"fluid\", data=fluid)\n m_opt.attach_field(name=\"ROI\", data=roi)\n\n iteration_mesh = os.path.join(\n self.comm.project.lasif_root,\n \"MODELS\",\n f\"ITERATION_{iteration}\",\n \"mesh.h5\",\n )\n if not os.path.exists(os.path.dirname(iteration_mesh)):\n os.makedirs(os.path.dirname(iteration_mesh))\n m_opt.write_h5(iteration_mesh)\n\n def get_average_model(self, iteration_range: tuple) -> Path:\n \"\"\"\n Get an average model between a list of iteration numbers.\n Can be used to get a smoother misfit curve for validation\n data set.\n \n :param iteration_range: From iteration to iteration tuple\n :type iterations: tuple\n \"\"\"\n # I have to make sure the I am consistent with naming of things, might be a bit off there\n\n folder_name = f\"it_{iteration_range[0]}_to_{iteration_range[1]}\"\n full_path = self.average_meshes / folder_name / \"mesh.h5\"\n if not os.path.exists(full_path.parent):\n os.makedirs(full_path.parent)\n\n # We copy the newest mesh from SALVUS_OPT to LASIF and write the\n # average fields onto those.\n\n model = self.comm.salvus_opt.get_model_path()\n shutil.copy(model, full_path)\n\n m = UnstructuredMesh.from_h5(full_path)\n fields = m.element_nodal_fields\n new_fields = {}\n for field in fields.keys():\n new_fields[field] = np.zeros_like(fields[field])\n # m.element_nodal_fields = {}\n for iteration in range(iteration_range[0], iteration_range[1] + 1):\n it = self.comm.salvus_opt.get_name_for_accepted_iteration_number(\n number=iteration\n )\n model_path = self.comm.salvus_opt.get_model_path(iteration=it)\n m_tmp = UnstructuredMesh.from_h5(model_path)\n for field_name, field in new_fields.items():\n field += m_tmp.element_nodal_fields[field_name]\n\n for field_name, field in new_fields.items():\n field /= len(range(iteration_range[0], iteration_range[1] + 1))\n m.attach_field(field_name, field)\n m.write_h5(full_path)\n print(\n f\"Wrote and average model of iteration {iteration_range[0]} to\"\n f\" iteration {iteration_range[1]} onto mesh: {full_path}\"\n )\n\n return full_path\n\n def add_region_of_interest(self, event: str):\n \"\"\"\n Region of interest is the region where the gradient is computed\n and outside the region, it is not computed.\n Currently we add the region of interest as an elemental field\n which is the oposite of the fluid field.\n \n :param event: Name of event\n :type event: str\n \"\"\"\n\n mesh = self.comm.lasif.find_event_mesh(event)\n m = UnstructuredMesh.from_h5(mesh)\n mesh_layers = np.sort(np.unique(m.elemental_fields[\"layer\"]))[\n ::-1\n ].astype(int)\n layers = m.elemental_fields[\"layer\"]\n o_core_idx = layers[np.where(m.elemental_fields[\"fluid\"] == 1)[0][0]]\n o_core_idx = np.where(mesh_layers == o_core_idx)[0][0]\n correct_layers = mesh_layers[o_core_idx:]\n roi = np.zeros_like(layers)\n for layer in correct_layers:\n roi = np.logical_or(roi, layers == layer)\n\n m.attach_field(\"ROI\", roi)\n m.write_h5(mesh)\n\n def write_new_opt_fields_to_simulation_mesh(self):\n \"\"\"\n Salvus opt makes a mesh which has the correct velocities but\n it does not have everything which is needed to run a simulation.\n We will thus write it's fields on to our simulation mesh.\n \"\"\"\n if self.comm.project.meshes == \"multi-mesh\":\n raise InversionsonError(\n \"Multi-mesh inversion should not use this function. Only \"\n \"Mono-mesh.\"\n )\n print(\"Writing new fields to simulation mesh\")\n iteration = self.comm.project.current_iteration\n if \"validation\" in iteration:\n iteration = iteration[11:] # We don't need a special mesh\n opt_model = os.path.join(\n self.comm.salvus_opt.models, f\"{iteration}.h5\"\n )\n simulation_mesh = self.comm.lasif.get_simulation_mesh(\n event_name=None, iteration=\"current\"\n )\n\n sim_mesh_dir = os.path.dirname(simulation_mesh)\n success_file = os.path.join(sim_mesh_dir, \"success.txt\")\n\n if os.path.exists(simulation_mesh) and os.path.exists(success_file):\n print(\"Mesh already exists, will not add fields\")\n return\n else:\n shutil.copy(\n self.comm.lasif.lasif_comm.project.lasif_config[\n \"domain_settings\"\n ][\"domain_file\"],\n simulation_mesh,\n )\n\n with h5py.File(simulation_mesh, mode=\"r+\") as f_new:\n with h5py.File(opt_model, mode=\"r\") as f:\n dim_labels = (\n f[\"MODEL/data\"]\n .attrs.get(\"DIMENSION_LABELS\")[1][1:-1]\n .replace(\" \", \"\")\n .split(\"|\")\n )\n # This assumes the indices are the same in both files,\n # which seems to be the case as far as DP could tell.\n for param in self.comm.project.inversion_params:\n print(\"Writing field:\", param)\n i = dim_labels.index(param)\n f_new[\"MODEL/data\"][:, i, :] = f[\"MODEL/data\"][:, i, :]\n\n # When all fields are successfully copied write a file to indicate\n # success to prevent the issue that we continue\n # with the initial model when something here crashes unexpectedly.\n with open(success_file, \"w\") as text_file:\n text_file.write(\"All fields written successfully.\")\n\n def sum_two_fields_on_a_mesh(\n self,\n mesh: str,\n fieldname_1: str,\n fieldname_2: str,\n newname: str = None,\n delete_old_fields: bool = False,\n ):\n \"\"\"\n Take two fields on a mesh and sum them together. If no newname is\n given the summed field will be written into both the old fields.\n If newname is given the summed field will be written in there\n and if delete_old_fields is true they will be deleted of course.\n\n :param mesh: Path to mesh to be used\n :type mesh: str or Path \n :param fieldname_1: Name of field to be summed\n :type fieldname_1: str\n :param fieldname_2: Name of other field to be summed\n :type fieldname_2: str\n :param newname: Name of field to store summed field, defaults to None\n :type newname: str, optional\n :param delete_old_fields: Whether old fields should be deleted,\n defaults to False. Currently not implemented\n :type delete_old_fields: bool, optional\n \"\"\"\n\n m = UnstructuredMesh.from_h5(mesh)\n\n available_fields = list(m.element_nodal_fields.keys())\n if fieldname_1 not in available_fields:\n raise InversionsonError(\n f\"Field {fieldname_1} not available on mesh {mesh}. \"\n f\"Only available fields are: {available_fields}\"\n )\n if fieldname_2 not in available_fields:\n raise InversionsonError(\n f\"Field {fieldname_2} not available on mesh {mesh}. \"\n f\"Only available fields are: {available_fields}\"\n )\n\n if delete_old_fields:\n if newname is not None:\n raise InversionsonError(\n \"If you want to delete old fields you need to write the \"\n \"summed one into a new field\"\n )\n\n summed_field = np.copy(m.element_nodal_fields[fieldname_1])\n summed_field += m.element_nodal_fields[fieldname_2]\n\n if newname is None:\n m.attach_field(fieldname_1, summed_field)\n m.attach_field(fieldname_2, summed_field)\n m.write_h5(mesh)\n\n else:\n m.attach_field(newname, summed_field)\n\n def fill_inversion_params_with_zeroes(self, mesh: str):\n \"\"\"\n This is done because we don't interpolate every layer and then\n we want to make sure there is nothing sneaking into the gradients\n\n :param mesh: Path to mesh\n :type mesh: str\n \"\"\"\n print(\"Filling inversion parameters with zeros before interpolation\")\n m = UnstructuredMesh.from_h5(mesh)\n parameters = self.comm.project.inversion_params\n zero_nodal = np.zeros_like(m.element_nodal_fields[parameters[0]])\n\n for param in parameters:\n m.attach_field(param, zero_nodal)\n m.write_h5(mesh)\n"
] |
[
[
"numpy.abs",
"numpy.unique",
"numpy.logical_or",
"numpy.copy",
"numpy.zeros_like",
"numpy.where"
]
] |
Lucieno/gforce-public
|
[
"cb577ec22f2011d30dbef9dad9545ca1402f875b",
"cb577ec22f2011d30dbef9dad9545ca1402f875b"
] |
[
"src/secure_layers.py",
"src/dgk_basic.py"
] |
[
"from typing import Callable\n\nimport torch\nimport torch.nn.functional as F\n\nfrom avgpool2x2 import Avgpool2x2Server, Avgpool2x2Client, Avgpool2x2Common\nfrom comm import NamedBase, end_communicate, torch_sync, init_communicate, CommFheBuilder, BlobTorch, CommBase\nfrom config import Config\nfrom conv2d_ntt import calc_output_hw, Conv2dSecureCommon, Conv2dSecureServer, Conv2dSecureClient\nfrom fhe import FheBuilder\nfrom fully_connected import FcSecureCommon, FcSecureServer, FcSecureClient\nfrom maxpool2x2_dgk import Maxpool2x2DgkServer, Maxpool2x2DgkClient, Maxpool2x2DgkCommon\nfrom relu_dgk import ReluDgkCommon, ReluDgkServer, ReluDgkClient\nfrom secret_share import ReconToClientServer, ReconToClientClient\nfrom swap_share import SwapToClientOfflineCommon, SwapToClientOfflineServer, SwapToClientOfflineClient\nfrom timer_utils import NamedTimerInstance\nfrom torch_utils import get_prod, get_torch_size, marshal_funcs, gen_unirand_int_grain, pmod, compare_expected_actual, \\\n generate_random_mask, nmod\nfrom truncation import TruncCommon, TruncServer, TruncClient\n\n\nclass SecureLayerContext(NameError):\n class_name = \"SecureLayerContext\"\n fhe_builder_16: FheBuilder\n fhe_builder_23: FheBuilder\n rank: int\n\n def __init__(self, work_bit, data_bit, q_16, q_23, fhe_builder_16, fhe_builder_23, name):\n self.work_bit = work_bit\n self.data_bit = data_bit\n self.q_16 = q_16\n self.q_23 = q_23\n self.fhe_builder_16 = fhe_builder_16\n self.fhe_builder_23 = fhe_builder_23\n self.name = name\n\n self.data_range = 2 ** self.data_bit\n\n def set_rank(self, rank):\n self.rank = rank\n\n\nclass ContextRankBase(object):\n context: SecureLayerContext\n rank: int\n\n def __init__(self):\n pass\n\n def load_context(self, context: SecureLayerContext):\n self.context = context\n\n def is_server(self):\n return self.context.rank == Config.server_rank\n\n def is_client(self):\n return self.context.rank == Config.client_rank\n\n\nclass SecureLayerBase(NamedBase, ContextRankBase):\n class_name = \"SecureLayerBase\"\n prev_layer: \"SecureLayerBase\"\n next_layer: \"SecureLayerBase\"\n input_shape: torch.Size\n output_shape: torch.Size\n input_device: torch.device\n input_dtype: torch.dtype\n next_input_device: torch.device\n next_input_dtype: torch.dtype\n output_share: torch.Tensor\n reconstructed_output: torch.Tensor\n is_offline_known_s = False\n is_offline_known_c = False\n has_weight = False\n # hook: Callable[[torch.Tensor], None]\n\n def __init__(self, name):\n super().__init__(name)\n\n def register_next_layer(self, layer: \"SecureLayerBase\"):\n self.next_layer = layer\n self.next_input_device = layer.input_device\n self.next_input_dtype = layer.input_dtype\n\n def register_prev_layer(self, layer: \"SecureLayerBase\"):\n self.prev_layer = layer\n self.input_shape = layer.get_output_shape()\n\n # def register_hook(self, hook):\n # self.hook = hook\n\n def offline(self):\n raise NotImplementedError()\n\n def online(self):\n raise NotImplementedError()\n\n def reconstructed_to_server(self, comm_base: CommBase, modulus):\n blob_output_share = BlobTorch(self.get_output_shape(), torch.float, comm_base, self.name + \"_output_share\")\n\n if self.is_server():\n blob_output_share.prepare_recv()\n torch_sync()\n other_output_share = blob_output_share.get_recv()\n # print(self.name + \"_output_share\" + \"_server: have\", self.get_output_share())\n # print(self.name + \"_output_share\" + \"_server: received\", other_output_share)\n self.reconstructed_output = nmod(self.get_output_share() + other_output_share, modulus)\n # print(self.name + \"_output_share\" + \"_server: recon\", self.reconstructed_output)\n\n if self.is_client():\n torch_sync()\n blob_output_share.send(self.get_output_share())\n # print(self.name + \"_output_share\" + \"_client: sent\", self.get_output_share())\n\n def get_reconstructed_output(self):\n return self.reconstructed_output\n\n def get_output_shape(self):\n return self.output_shape\n\n def get_input_share(self):\n return self.prev_layer.get_output_share()\n\n def get_output_share(self):\n return self.output_share\n\n\nclass InputSecureLayer(SecureLayerBase):\n class_name = \"InputSecureLayer\"\n input_device = torch.device(\"cpu\")\n input_dtype = torch.float\n input_img: torch.Tensor\n swap_prot: SwapToClientOfflineCommon\n dummy_input_s: torch.Tensor\n is_offline_known_c = True\n\n def __init__(self, shape, name):\n super().__init__(name)\n self.input_shape = get_torch_size(shape)\n\n def offline(self):\n device = self.next_input_device\n dtype = self.next_input_dtype\n swap_prot_name = self.sub_name(\"swap_prot\")\n modulus = self.context.q_23\n if self.is_server():\n self.swap_prot = SwapToClientOfflineServer(get_prod(self.input_shape), modulus, swap_prot_name)\n self.dummy_input_s = torch.zeros(self.input_shape).to(device).type(dtype)\n self.swap_prot.offline()\n elif self.is_client():\n self.swap_prot = SwapToClientOfflineClient(get_prod(self.input_shape), modulus, swap_prot_name)\n self.output_share = generate_random_mask(modulus, self.input_shape)\n self.swap_prot.offline(self.output_share.reshape(-1))\n self.output_share = self.output_share.to(device).type(dtype)\n\n def online(self):\n device = self.next_input_device\n dtype = self.next_input_dtype\n if self.is_client():\n if self.input_img is None:\n raise Exception(\"Client should feed input\")\n self.swap_prot.online(self.input_img.reshape(-1))\n self.output_share = self.swap_prot.output_c\n elif self.is_server():\n self.swap_prot.online(self.dummy_input_s.reshape(-1))\n self.output_share = self.swap_prot.output_s\n self.output_share = self.output_share.to(device).type(dtype).reshape(self.input_shape)\n\n def feed_input(self, input_img: torch.Tensor):\n assert(input_img.shape == self.input_shape)\n assert(self.is_client())\n self.input_img = input_img\n\n def get_output_shape(self):\n return self.input_shape\n\n def get_output_share(self):\n return self.output_share\n\n\nclass OutputSecureLayer(SecureLayerBase):\n class_name = \"OutputSecureLayer\"\n input_device = torch.device(\"cpu\")\n input_dtype = torch.float\n output: torch.Tensor\n prot = None\n\n def __init__(self, name):\n super().__init__(name)\n\n def offline(self):\n num_elem = get_prod(self.input_shape)\n modulus = self.context.q_23\n name = self.class_name + self.name\n if self.is_server():\n self.prot = ReconToClientServer(num_elem, modulus, name)\n elif self.is_client():\n self.prot = ReconToClientClient(num_elem, modulus, name)\n\n self.prot.offline()\n\n def online(self):\n assert(self.get_input_share().shape == self.input_shape)\n self.prot.online(self.get_input_share().reshape(-1))\n if self.is_client():\n self.output = self.prot.output.reshape(self.input_shape)\n\n def get_output_shape(self):\n return self.input_shape\n\n def get_output(self):\n return self.output\n\n\nclass FlattenSecureLayer(SecureLayerBase):\n class_name = \"FlattenSecureLayer\"\n input_device = None\n input_dtype = None\n\n def __init__(self, name):\n super().__init__(name)\n\n def register_prev_layer(self, layer: SecureLayerBase):\n SecureLayerBase.register_prev_layer(self, layer)\n self.output_shape = get_torch_size(get_prod(self.input_shape))\n\n def register_next_layer(self, layer: SecureLayerBase):\n SecureLayerBase.register_next_layer(self, layer)\n self.input_device = self.next_input_device\n self.input_dtype = self.next_input_dtype\n self.prev_layer.register_next_layer(self)\n\n def offline(self):\n return\n\n def online(self):\n assert(self.get_input_share().shape == self.input_shape)\n self.output_share = self.get_input_share().reshape(self.output_shape)\n\n\nclass SwapToClientOfflineLayer(SecureLayerBase):\n class_name = \"SwapToClientOfflineLayer\"\n input_device = torch.device(\"cuda\")\n input_dtype = torch.float\n swap_prot: SwapToClientOfflineCommon\n swapped_input_s: torch.Tensor\n swapped_input_c: torch.Tensor\n is_need_swap = True\n\n def __init__(self, name):\n super().__init__(name)\n\n def offline(self):\n modulus = self.context.q_23\n swap_prot_name = self.sub_name(\"swap_prot\")\n\n if self.is_need_swap:\n if self.is_server():\n self.swap_prot = SwapToClientOfflineServer(get_prod(self.input_shape), modulus, swap_prot_name)\n self.swap_prot.offline()\n elif self.is_client():\n self.swap_prot = SwapToClientOfflineClient(get_prod(self.input_shape), modulus, swap_prot_name)\n self.swapped_input_c = generate_random_mask(modulus, self.input_shape)\n self.swap_prot.offline(self.swapped_input_c.reshape(-1))\n self.swapped_input_c = self.swapped_input_c.to(Config.device).reshape(self.input_shape)\n if not self.is_need_swap and self.is_client():\n self.swapped_input_c = self.get_input_share().to(Config.device)\n\n def online(self):\n if self.is_need_swap:\n if self.is_server():\n self.swap_prot.online(self.get_input_share().reshape(-1))\n self.swapped_input_s = self.swap_prot.output_s.reshape(self.input_shape)\n elif self.is_client():\n self.swap_prot.online(self.get_input_share().reshape(-1))\n self.swapped_input_c = self.swapped_input_c.to(Config.device).reshape(self.input_shape)\n if not self.is_need_swap and self.is_server():\n self.swapped_input_s = self.get_input_share()\n\n\nclass Conv2dSecureLayer(SwapToClientOfflineLayer):\n class_name = \"Conv2dSecureLayer\"\n input_device = torch.device(\"cuda\")\n input_dtype = torch.float\n is_offline_known_c = True\n has_weight = True\n img_hw: int\n output_hw: int\n compute_prot: Conv2dSecureCommon\n weight: torch.Tensor\n bias: torch.Tensor\n\n def __init__(self, num_input_channel, num_output_channel, filter_hw, name, padding=1, bias=None):\n super().__init__(name)\n self.num_input_channel = num_input_channel\n self.num_output_channel = num_output_channel\n self.filter_hw = filter_hw\n self.padding = padding\n self.bias = bias\n\n self.weight_shape = torch.Size([num_output_channel, num_input_channel, filter_hw, filter_hw])\n self.bias_shape = torch.Size([num_output_channel])\n\n def register_prev_layer(self, layer: \"SecureLayerBase\"):\n SecureLayerBase.register_prev_layer(self, layer)\n self.input_shape = layer.get_output_shape()\n assert(len(self.input_shape) == 3)\n assert(self.input_shape[-3] == self.num_input_channel)\n assert(self.input_shape[-1] == self.input_shape[-2])\n self.img_hw = self.input_shape[-2]\n self.output_hw = calc_output_hw(self.img_hw, self.filter_hw, self.padding)\n self.output_shape = torch.Size([self.num_output_channel, self.output_hw, self.output_hw])\n if layer.is_offline_known_c:\n self.is_need_swap = False\n\n def load_weight(self, weight, bias=None):\n self.weight = weight\n self.bias = bias\n\n def offline(self):\n SwapToClientOfflineLayer.offline(self)\n\n modulus = self.context.q_23\n compute_prot_name = self.sub_name(\"compute_prot\")\n fhe_builder = self.context.fhe_builder_23\n data_range = self.context.data_range\n\n if self.is_server():\n self.compute_prot = Conv2dSecureServer(modulus, fhe_builder, data_range, self.img_hw, self.filter_hw,\n self.num_input_channel, self.num_output_channel,\n compute_prot_name, padding=self.padding)\n self.compute_prot.offline(self.weight, bias=self.bias)\n elif self.is_client():\n self.compute_prot = Conv2dSecureClient(modulus, fhe_builder, data_range, self.img_hw, self.filter_hw,\n self.num_input_channel, self.num_output_channel,\n compute_prot_name, padding=self.padding)\n self.compute_prot.offline(self.swapped_input_c)\n self.output_share = self.compute_prot.output_c\n\n def online(self):\n SwapToClientOfflineLayer.online(self)\n if self.is_server():\n self.compute_prot.online(self.swapped_input_s)\n self.output_share = self.compute_prot.output_s\n elif self.is_client():\n self.compute_prot.online()\n\n\nclass FcSecureLayer(SwapToClientOfflineLayer):\n class_name = \"FcSecureLayer\"\n input_device = torch.device(\"cuda\")\n input_dtype = torch.float\n is_offline_known_c = True\n has_weight = True\n img_hw: int\n output_hw: int\n compute_prot: FcSecureCommon\n weight: torch.Tensor\n bias: torch.Tensor\n num_input_unit: int\n num_output_unit: int\n\n def __init__(self, num_output_unit, name):\n super().__init__(name)\n self.num_output_unit = num_output_unit\n\n def register_prev_layer(self, layer: \"SecureLayerBase\"):\n SecureLayerBase.register_prev_layer(self, layer)\n self.input_shape = layer.get_output_shape()\n assert(len(self.input_shape) == 1)\n self.num_input_unit = self.input_shape[-1]\n self.weight_shape = torch.Size([self.num_output_unit, self.num_input_unit])\n self.output_shape = torch.Size([self.num_output_unit])\n self.bias_shape = torch.Size([self.num_output_unit])\n\n if layer.is_offline_known_c:\n self.is_need_swap = False\n\n def load_weight(self, weight, bias=None):\n self.weight = weight\n self.bias = bias\n\n def offline(self):\n SwapToClientOfflineLayer.offline(self)\n\n modulus = self.context.q_23\n compute_prot_name = self.sub_name(\"compute_prot\")\n fhe_builder = self.context.fhe_builder_23\n data_range = self.context.data_range\n\n if self.is_server():\n self.compute_prot = FcSecureServer(modulus, data_range, self.num_input_unit, self.num_output_unit,\n fhe_builder, compute_prot_name)\n self.compute_prot.offline(self.weight, bias=self.bias)\n elif self.is_client():\n self.compute_prot = FcSecureClient(modulus, data_range, self.num_input_unit, self.num_output_unit,\n fhe_builder, compute_prot_name)\n self.compute_prot.offline(self.swapped_input_c)\n self.output_share = self.compute_prot.output_c\n\n def online(self):\n SwapToClientOfflineLayer.online(self)\n if self.is_server():\n self.compute_prot.online(self.swapped_input_s)\n self.output_share = self.compute_prot.output_s\n elif self.is_client():\n self.compute_prot.online()\n\n\nclass TruncSecureLayer(SecureLayerBase):\n class_name = \"TruncSecureLayer\"\n input_device = torch.device(\"cuda\")\n input_dtype = torch.float\n prot: TruncCommon\n div_to_pow: int\n\n def __init__(self, name):\n super().__init__(name)\n\n def register_prev_layer(self, layer: SecureLayerBase):\n SecureLayerBase.register_prev_layer(self, layer)\n self.input_shape = layer.get_output_shape()\n self.output_shape = self.input_shape\n\n def set_div_to_pow(self, div_to_pow):\n self.div_to_pow = div_to_pow\n\n def offline(self):\n num_elem = get_prod(self.input_shape)\n name = self.sub_name(\"trunc_prot\")\n\n if self.is_server():\n self.prot = TruncServer(num_elem, self.context.q_23, self.div_to_pow, self.context.fhe_builder_23, name)\n elif self.is_client():\n self.prot = TruncClient(num_elem, self.context.q_23, self.div_to_pow, self.context.fhe_builder_23, name)\n\n self.prot.offline()\n\n def online(self):\n self.prot.online(self.get_input_share().reshape(-1))\n\n if self.is_server():\n self.output_share = self.prot.out_s\n elif self.is_client():\n self.output_share = self.prot.out_c\n\n device = self.next_input_device\n dtype = self.next_input_dtype\n self.output_share = self.output_share.to(device).type(dtype).reshape(self.output_shape)\n\n\nclass ReluSecureLayer(SecureLayerBase):\n class_name = \"ReluSecureLayer\"\n input_device = torch.device(\"cuda\")\n input_dtype = torch.float\n prot: ReluDgkCommon\n\n def __init__(self, name):\n super().__init__(name)\n\n def register_prev_layer(self, layer: SecureLayerBase):\n SecureLayerBase.register_prev_layer(self, layer)\n self.input_shape = layer.get_output_shape()\n self.output_shape = self.input_shape\n\n def offline(self):\n num_elem = get_prod(self.input_shape)\n name = self.sub_name(\"relu_dgk_prot\")\n\n if self.is_server():\n self.prot = ReluDgkServer(num_elem, self.context.q_23, self.context.q_16,\n self.context.work_bit, self.context.data_bit,\n self.context.fhe_builder_16, self.context.fhe_builder_23, name)\n elif self.is_client():\n self.prot = ReluDgkClient(num_elem, self.context.q_23, self.context.q_16,\n self.context.work_bit, self.context.data_bit,\n self.context.fhe_builder_16, self.context.fhe_builder_23, name)\n\n self.prot.offline()\n\n def online(self):\n self.prot.online(self.get_input_share().reshape(-1))\n\n if self.is_server():\n self.output_share = self.prot.max_s\n elif self.is_client():\n self.output_share = self.prot.max_c\n\n device = self.next_input_device\n dtype = self.next_input_dtype\n self.output_share = self.output_share.to(device).type(dtype).reshape(self.output_shape)\n\n\nclass Maxpool2x2SecureLayer(SecureLayerBase):\n class_name = \"Maxpool2x2SecureLayer\"\n input_device = torch.device(Config.device)\n input_dtype = torch.float\n prot: Maxpool2x2DgkCommon\n input_hw: int\n output_hw: int\n num_channel: int\n\n def __init__(self, name):\n super().__init__(name)\n\n def register_prev_layer(self, layer: SecureLayerBase):\n SecureLayerBase.register_prev_layer(self, layer)\n self.input_shape = layer.get_output_shape()\n assert(len(self.input_shape) == 3)\n assert(self.input_shape[-1] == self.input_shape[-2])\n self.input_hw = self.input_shape[-1]\n assert(self.input_hw % 2 == 0)\n self.output_hw = self.input_hw // 2\n self.num_channel = self.input_shape[-3]\n self.output_shape = torch.Size([self.num_channel, self.output_hw, self.output_hw])\n\n def offline(self):\n num_elem = get_prod(self.input_shape)\n name = self.sub_name(\"maxpool2x2_dgk_prot\")\n\n if self.is_server():\n self.prot = Maxpool2x2DgkServer(num_elem, self.context.q_23, self.context.q_16,\n self.context.work_bit, self.context.data_bit,\n self.input_hw,\n self.context.fhe_builder_16, self.context.fhe_builder_23, name)\n elif self.is_client():\n self.prot = Maxpool2x2DgkClient(num_elem, self.context.q_23, self.context.q_16,\n self.context.work_bit, self.context.data_bit,\n self.input_hw,\n self.context.fhe_builder_16, self.context.fhe_builder_23, name)\n\n self.prot.offline()\n\n def online(self):\n self.prot.online(self.get_input_share().reshape(-1))\n\n if self.is_server():\n self.output_share = self.prot.max_s\n elif self.is_client():\n self.output_share = self.prot.max_c\n\n device = self.next_input_device\n dtype = self.next_input_dtype\n self.output_share = self.output_share.to(device).type(dtype).reshape(self.output_shape)\n\n\nclass Avgpool2x2SecureLayer(SecureLayerBase):\n class_name = \"Avgpool2x2SecureLayer\"\n input_device = torch.device(\"cuda\")\n input_dtype = torch.float\n prot: Avgpool2x2Common\n input_hw: int\n output_hw: int\n num_channel: int\n\n def __init__(self, name):\n super().__init__(name)\n\n def register_prev_layer(self, layer: SecureLayerBase):\n SecureLayerBase.register_prev_layer(self, layer)\n self.input_shape = layer.get_output_shape()\n assert(len(self.input_shape) == 3)\n assert(self.input_shape[-1] == self.input_shape[-2])\n self.input_hw = self.input_shape[-1]\n assert(self.input_hw % 2 == 0)\n self.output_hw = self.input_hw // 2\n self.num_channel = self.input_shape[-3]\n self.output_shape = torch.Size([self.num_channel, self.output_hw, self.output_hw])\n\n def offline(self):\n num_elem = get_prod(self.input_shape)\n name = self.sub_name(\"avgpool2x2_prot\")\n\n if self.is_server():\n self.prot = Avgpool2x2Server(num_elem, self.context.q_23, self.context.q_16,\n self.context.work_bit, self.context.data_bit,\n self.input_hw,\n self.context.fhe_builder_16, self.context.fhe_builder_23, name)\n elif self.is_client():\n self.prot = Avgpool2x2Client(num_elem, self.context.q_23, self.context.q_16,\n self.context.work_bit, self.context.data_bit,\n self.input_hw,\n self.context.fhe_builder_16, self.context.fhe_builder_23, name)\n\n self.prot.offline()\n\n def online(self):\n self.prot.online(self.get_input_share().reshape(-1))\n\n if self.is_server():\n self.output_share = self.prot.max_s\n elif self.is_client():\n self.output_share = self.prot.max_c\n\n device = self.next_input_device\n dtype = self.next_input_dtype\n self.output_share = self.output_share.to(device).type(dtype).reshape(self.output_shape)\n\n\nclass SecureNeuralNetwork(NamedBase, ContextRankBase):\n class_name = \"SecureNeuralNetwork\"\n context: SecureLayerContext\n input_layer: InputSecureLayer\n output_layer: OutputSecureLayer\n layers: list\n\n def __init__(self, name):\n super().__init__(name)\n\n def load_layers(self, layers):\n self.layers = layers\n\n if not isinstance(self.layers[0], InputSecureLayer):\n raise ValueError(\"The first layer has to be input layer\")\n if not isinstance(self.layers[-1], OutputSecureLayer):\n raise ValueError(\"The last layer has to be output layer\")\n\n self.input_layer = layers[0]\n self.output_layer = layers[-1]\n\n for i in range(len(self.layers) - 1):\n prev_layer = self.layers[i]\n next_layer = self.layers[i + 1]\n prev_layer.register_next_layer(next_layer)\n next_layer.register_prev_layer(prev_layer)\n\n def load_context(self, context: SecureLayerContext):\n ContextRankBase.load_context(self, context)\n self.context = context\n\n for layer in self.layers:\n layer.load_context(context)\n\n def feed_input(self, img):\n if self.is_client():\n self.input_layer.feed_input(img)\n else:\n raise Exception(\"Only the client, not the server, can input.\")\n\n def offline(self):\n party = \"Server\" if self.is_server() else \"Client\"\n for layer in self.layers:\n with NamedTimerInstance(f\"{party} Offline of {layer.name}\"):\n layer.offline()\n torch_sync()\n\n def online(self):\n party = \"Server\" if self.is_server() else \"Client\"\n for layer in self.layers:\n with NamedTimerInstance(f\"{party} Online of {layer.name}\"):\n layer.online()\n # torch_sync()\n\n def get_output(self):\n return self.output_layer.get_output()\n\n def get_argmax_output(self):\n _, predicted = torch.max(self.get_output(), 1)\n return predicted\n\n\ndef test_secure_nn():\n test_name = \"test_secure_nn\"\n print(f\"\\nTest for {test_name}: Start\")\n data_bit = 5\n work_bit = 17\n data_range = 2 ** data_bit\n q_16 = 12289\n # q_23 = 786433\n q_23 = 7340033\n # q_23 = 8273921\n input_img_hw = 16\n input_channel = 3\n pow_to_div = 2\n\n fhe_builder_16 = FheBuilder(q_16, 2048)\n fhe_builder_23 = FheBuilder(q_23, 8192)\n\n input_shape = [input_channel, input_img_hw, input_img_hw]\n\n context = SecureLayerContext(work_bit, data_bit, q_16, q_23, fhe_builder_16, fhe_builder_23, test_name+\"_context\")\n\n input_layer = InputSecureLayer(input_shape, \"input_layer\")\n conv1 = Conv2dSecureLayer(3, 5, 3, \"conv1\", padding=1)\n relu1 = ReluSecureLayer(\"relu1\")\n trunc1 = TruncSecureLayer(\"trunc1\")\n pool1 = Maxpool2x2SecureLayer(\"pool1\")\n conv2 = Conv2dSecureLayer(5, 10, 3, \"conv2\", padding=1)\n flatten = FlattenSecureLayer(\"flatten\")\n fc1 = FcSecureLayer(32, \"fc1\")\n output_layer = OutputSecureLayer(\"output_layer\")\n\n secure_nn = SecureNeuralNetwork(\"secure_nn\")\n secure_nn.load_layers([input_layer, conv1, pool1, relu1, trunc1, conv2, flatten, fc1, output_layer])\n # secure_nn.load_layers([input_layer, relu1, trunc1, output_layer])\n secure_nn.load_context(context)\n\n def generate_random_data(shape):\n return gen_unirand_int_grain(-data_range//2 + 1, data_range//2, get_prod(shape)).reshape(shape)\n\n conv1_w = generate_random_data(conv1.weight_shape)\n conv2_w = generate_random_data(conv2.weight_shape)\n fc1_w = generate_random_data(fc1.weight_shape)\n\n def check_correctness(input_img, output):\n torch_pool1 = torch.nn.MaxPool2d(2)\n\n x = input_img.to(Config.device).double()\n x = x.reshape([1] + list(x.shape))\n x = pmod(F.conv2d(x, conv1_w.to(Config.device).double(), padding=1), q_23)\n x = pmod(F.relu(nmod(x, q_23)), q_23)\n x = pmod(torch_pool1(nmod(x, q_23)), q_23)\n x = pmod(x // (2 ** pow_to_div), q_23)\n x = pmod(F.conv2d(x, conv2_w.to(Config.device).double(), padding=1), q_23)\n x = x.view(-1)\n x = pmod(torch.mm(x.view(1, -1), fc1_w.to(Config.device).double().t()).view(-1), q_23)\n\n expected = x\n actual = pmod(output, q_23)\n if len(expected.shape) == 4 and expected.shape[0] == 1:\n expected = expected.reshape(expected.shape[1:])\n compare_expected_actual(expected, actual, name=test_name, get_relative=True)\n\n def test_server():\n rank = Config.server_rank\n init_communicate(rank)\n context.set_rank(rank)\n\n comm_fhe_16 = CommFheBuilder(rank, fhe_builder_16, \"fhe_builder_16\")\n comm_fhe_23 = CommFheBuilder(rank, fhe_builder_23, \"fhe_builder_23\")\n comm_fhe_16.recv_public_key()\n comm_fhe_23.recv_public_key()\n comm_fhe_16.wait_and_build_public_key()\n comm_fhe_23.wait_and_build_public_key()\n\n conv1.load_weight(conv1_w)\n conv2.load_weight(conv2_w)\n fc1.load_weight(fc1_w)\n trunc1.set_div_to_pow(pow_to_div)\n\n with NamedTimerInstance(\"Server Offline\"):\n secure_nn.offline()\n torch_sync()\n\n with NamedTimerInstance(\"Server Online\"):\n secure_nn.online()\n torch_sync()\n\n end_communicate()\n\n def test_client():\n rank = Config.client_rank\n init_communicate(rank)\n context.set_rank(rank)\n\n fhe_builder_16.generate_keys()\n fhe_builder_23.generate_keys()\n comm_fhe_16 = CommFheBuilder(rank, fhe_builder_16, \"fhe_builder_16\")\n comm_fhe_23 = CommFheBuilder(rank, fhe_builder_23, \"fhe_builder_23\")\n comm_fhe_16.send_public_key()\n comm_fhe_23.send_public_key()\n\n input_img = generate_random_data(input_shape)\n trunc1.set_div_to_pow(pow_to_div)\n secure_nn.feed_input(input_img)\n\n with NamedTimerInstance(\"Client Offline\"):\n secure_nn.offline()\n torch_sync()\n\n with NamedTimerInstance(\"Client Online\"):\n secure_nn.online()\n torch_sync()\n\n check_correctness(input_img, secure_nn.get_output())\n end_communicate()\n\n marshal_funcs([test_server, test_client])\n print(f\"\\nTest for {test_name}: End\")\n\nif __name__ == \"__main__\":\n test_secure_nn()\n",
"import sys\nfrom itertools import product\n\nimport torch\n\nfrom comm import CommBase, CommFheBuilder, BlobFheEnc, BlobTorch, BlobFheEnc2D, init_communicate, end_communicate, \\\n torch_sync, TrafficRecord\nfrom config import Config\nfrom dgk_single_thread import DgkBase\nfrom enc_refresher import EncRefresherServer, EncRefresherClient\nfrom fhe import FheBuilder\nfrom logger_utils import Logger\nfrom timer_utils import NamedTimerInstance\nfrom torch_utils import gen_unirand_int_grain, pmod, shuffle_torch, compare_expected_actual, marshal_funcs, \\\n argparser_distributed, warming_up_cuda\n\n\nclass DgkCommBase(DgkBase):\n modulus = None\n def __init__(self, num_elem, q_23, q_16, work_bit, data_bit,\n fhe_builder_16: FheBuilder, fhe_builder_23: FheBuilder, name: str, rank: int, class_name: str):\n super(DgkCommBase, self).__init__(num_elem, q_23, q_16, work_bit, data_bit)\n self.class_name = class_name\n self.name = name\n self.rank = rank\n self.fhe_builder_16 = fhe_builder_16\n self.fhe_builder_23 = fhe_builder_23\n self.comm_base = CommBase(rank, name)\n self.comm_fhe_16 = CommFheBuilder(rank, fhe_builder_16, self.sub_name(\"comm_fhe_16\"))\n self.comm_fhe_23 = CommFheBuilder(rank, fhe_builder_23, self.sub_name(\"comm_fhe_23\"))\n\n assert(self.fhe_builder_16.modulus == self.q_16)\n assert(self.fhe_builder_23.modulus == self.q_23)\n\n def generate_random(self):\n return gen_unirand_int_grain(0, self.modulus - 1, self.num_elem)\n\n def mod_to_modulus(self, input):\n return pmod(input, self.modulus)\n\n def sub_name(self, sub_name: str) -> str:\n return self.name + '_' + self.class_name +'_' + sub_name\n\n\nclass DgkBitCommon(DgkBase):\n def __init__(self, num_elem, q_23, q_16, work_bit, data_bit, name: str,\n comm_base: CommBase, comm_fhe_16: CommFheBuilder, comm_fhe_23: CommFheBuilder):\n super(DgkBitCommon, self).__init__(num_elem, q_23, q_16, work_bit, data_bit)\n self.comm_base = comm_base\n self.comm_fhe_16 = comm_fhe_16\n self.comm_fhe_23 = comm_fhe_23\n self.name = name\n\n self.beta_i_c = BlobFheEnc2D(self.decomp_bit_shape, comm_fhe_16, self.sub_name(\"beta_i_c\"))\n self.delta_b_c = BlobFheEnc(self.num_elem, comm_fhe_23, self.sub_name(\"delta_b_c\"))\n self.z_work_c = BlobFheEnc(self.num_elem, comm_fhe_23, self.sub_name(\"z_work_c\"))\n self.c_i_c = BlobFheEnc2D(self.sum_shape, comm_fhe_16, self.sub_name(\"c_i_c\"))\n self.dgk_x_leq_y_c = BlobFheEnc(self.num_elem, comm_fhe_23, self.sub_name(\"dgk_x_leq_y_c\"))\n self.delta_xor_c = BlobFheEnc(self.num_elem, comm_fhe_23, self.sub_name(\"delta_xor_c\"))\n self.fhe_pre_corr_mod = BlobFheEnc(self.num_elem, comm_fhe_23, self.sub_name(\"fhe_pre_corr_mod\"))\n self.fhe_corr_mod_c = BlobFheEnc(self.num_elem, comm_fhe_23, self.sub_name(\"fhe_corr_mod_c\"))\n\n self.z_s = BlobTorch(self.num_elem, torch.float, self.comm_base, self.sub_name(\"z_s\"))\n # self.beta_i_s = BlobTorch(self.decomp_bit_shape, torch.int16, self.comm_base, self.sub_name(\"beta_i_s\"), comp_dtype=torch.float)\n self.beta_i_s = BlobTorch(self.decomp_bit_shape, torch.float, self.comm_base, self.sub_name(\"beta_i_s\"), comp_dtype=torch.float)\n # self.c_i_s = BlobTorch(self.sum_shape, torch.int16, self.comm_base, self.sub_name(\"c_i_s\"), comp_dtype=torch.float)\n self.c_i_s = BlobTorch(self.sum_shape, torch.float, self.comm_base, self.sub_name(\"c_i_s\"), comp_dtype=torch.float)\n self.delta_b_s = BlobTorch(self.num_elem, torch.float, self.comm_base, self.sub_name(\"delta_b_s\"))\n self.z_work_s = BlobTorch(self.num_elem, torch.float, self.comm_base, self.sub_name(\"z_work_s\"))\n self.pre_corr_mod_s = BlobTorch(self.num_elem, torch.float, self.comm_base, self.sub_name(\"pre_corr_mod_s\"))\n\n self.offline_server_send = [self.c_i_c, self.dgk_x_leq_y_c, self.delta_xor_c, self.fhe_corr_mod_c]\n self.offline_client_send = [self.beta_i_c, self.delta_b_c, self.z_work_c, self.fhe_pre_corr_mod]\n self.online_server_send = [self.z_s, self.c_i_s]\n self.online_client_send = [self.beta_i_s, self.delta_b_s, self.z_work_s, self.pre_corr_mod_s]\n\n def sub_name(self, sub_name: str) -> str:\n return self.name + '_DgkBit_' + sub_name\n\n def decomp_to_bit(self, x, res=None):\n tmp_x = torch.clone(x).to(Config.device)\n res = torch.zeros([self.work_bit, self.num_elem]) if res is None else res\n for i in range(self.work_bit):\n res[i] = pmod(tmp_x, 2)\n tmp_x //= 2\n return res\n\n\nclass DgkBitServer(DgkBase):\n def __init__(self, num_elem, q_23, q_16, work_bit, data_bit,\n fhe_builder_16: FheBuilder, fhe_builder_23: FheBuilder, name: str, is_shuffle=None):\n super(DgkBitServer, self).__init__(num_elem, q_23, q_16, work_bit, data_bit, name=name)\n self.fhe_builder_16 = fhe_builder_16\n self.fhe_builder_23 = fhe_builder_23\n self.comm_base = CommBase(Config.server_rank, name)\n self.comm_fhe_16 = CommFheBuilder(Config.server_rank, self.fhe_builder_16, name+'_'+\"comm_fhe_16\")\n self.comm_fhe_23 = CommFheBuilder(Config.server_rank, self.fhe_builder_23, name+'_'+\"comm_fhe_23\")\n self.common = DgkBitCommon(num_elem, q_23, q_16, work_bit, data_bit, name,\n self.comm_base, self.comm_fhe_16, self.comm_fhe_23)\n self.is_shuffle = Config.is_shuffle if is_shuffle is None else is_shuffle\n\n def xor_fhe(self, alpha_i, fhe_enc, mask_s, modulus, change_sign):\n if modulus == self.q_16:\n fhe_builder = self.fhe_builder_16\n elif modulus == self.q_23:\n fhe_builder = self.fhe_builder_23\n else:\n raise Exception(f\"Unknown modulus: {modulus}\")\n zeros = torch.zeros_like(alpha_i)\n mult = torch.where(alpha_i == change_sign, modulus - 1 + zeros, 1 + zeros)\n bias = torch.where(alpha_i == change_sign, 1 + zeros, zeros)\n fhe_mult = fhe_builder.build_plain_from_torch(mult)\n fhe_bias = fhe_builder.build_plain_from_torch(bias)\n fhe_mask_s = fhe_builder.build_plain_from_torch(mask_s)\n fhe_enc *= fhe_mult\n fhe_enc += fhe_bias\n fhe_enc += fhe_mask_s\n del fhe_mult, fhe_bias, fhe_mask_s\n return fhe_enc\n\n def xor_alpha_known_offline(self, alpha_i, fhe_beta_i_c, mask_s):\n assert(len(alpha_i) == self.work_bit)\n assert(len(fhe_beta_i_c) == self.work_bit)\n assert(len(mask_s) == self.work_bit)\n return [self.xor_fhe(alpha_i[i], fhe_beta_i_c[i], mask_s[i], self.q_16, 1) for i in range(self.work_bit)]\n\n def xor_alpha_known_online(self, alpha_i, beta_i_s, mask_s, modulus):\n res = torch.where(alpha_i == 1, -beta_i_s, beta_i_s)\n res += modulus - mask_s\n res.fmod_(modulus)\n return res\n\n def generate_fhe_shuffled(self, shuffle_order, enc):\n num_batch = self.num_work_batch\n fhe_builder = self.fhe_builder_16\n # res = [fhe_builder.build_enc(self.num_elem) for i in range(num_batch)]\n res = [None for i in range(num_batch)]\n zeros = torch.tensor(0).type(torch.int64)\n shuffle_order = shuffle_order.cpu()\n for dst, src in product(range(num_batch), range(num_batch)):\n mask = torch.where(shuffle_order[src, :] == dst, zeros + 1, zeros)\n # print(torch.sum(mask))\n fhe_mask = fhe_builder.build_plain_from_torch(mask)\n enc_tmp = enc[src].copy()\n # fhe_builder.noise_budget(enc_tmp, \"enc_tmp\")\n enc_tmp *= fhe_mask\n # fhe_builder.noise_budget(enc_tmp, \"enc_tmp\")\n if src == 0:\n res[dst] = enc_tmp\n else:\n res[dst] += enc_tmp\n return res\n\n def sum_c_i_offline(self, delta_a, fhe_beta_i_c, fhe_alpha_beta_xor_c, s, alpha_i,\n ci_mask_s, mult_mask_s, shuffle_order):\n # the last row of sum_xor is c_{-1}, which helps check the case with x == y\n fhe_builder = self.fhe_builder_16\n # fhe_sum_xor = [fhe_builder.build_enc(self.num_elem) for i in range(self.num_work_batch)]\n fhe_sum_xor = [None for i in range(self.num_work_batch)]\n fhe_sum_xor[self.work_bit - 1] = fhe_builder.build_enc(self.num_elem)\n for i in range(self.work_bit - 1)[::-1]:\n fhe_sum_xor[i] = fhe_sum_xor[i + 1].copy()\n fhe_sum_xor[i] += fhe_alpha_beta_xor_c[i + 1]\n fhe_delta_a = fhe_builder.build_plain_from_torch(delta_a)\n fhe_sum_xor[self.work_bit] = fhe_sum_xor[0].copy()\n fhe_sum_xor[self.work_bit] += fhe_alpha_beta_xor_c[0]\n fhe_sum_xor[self.work_bit] += fhe_delta_a\n del fhe_delta_a\n\n for i in range(self.work_bit)[::-1]:\n fhe_mult_3 = fhe_builder.build_plain_from_torch(pmod(3 * mult_mask_s[i].cpu(), self.q_16))\n fhe_mult_mask_s = fhe_builder.build_plain_from_torch(mult_mask_s[i])\n masked_s = pmod(s.type(torch.int64) * mult_mask_s[i].type(torch.int64), self.q_16).type(torch.float32)\n # print(\"s * mult_mask_s[i]\", torch.max(masked_s))\n fhe_s = fhe_builder.build_plain_from_torch(masked_s)\n fhe_alpha_i = fhe_builder.build_plain_from_torch(alpha_i[i] * mult_mask_s[i])\n fhe_ci_mask_s = fhe_builder.build_plain_from_torch(ci_mask_s[i])\n fhe_beta_i_c[i] *= fhe_mult_mask_s\n fhe_sum_xor[i] *= fhe_mult_3\n fhe_sum_xor[i] -= fhe_beta_i_c[i]\n fhe_sum_xor[i] += fhe_s\n fhe_sum_xor[i] += fhe_alpha_i\n fhe_sum_xor[i] += fhe_ci_mask_s\n\n del fhe_mult_3, fhe_mult_mask_s, fhe_s, fhe_alpha_i, fhe_ci_mask_s\n\n fhe_mult_mask_s = fhe_builder.build_plain_from_torch(mult_mask_s[self.work_bit])\n fhe_ci_mask_s = fhe_builder.build_plain_from_torch(ci_mask_s[self.work_bit])\n fhe_sum_xor[self.work_bit] *= fhe_mult_mask_s\n fhe_sum_xor[self.work_bit] += fhe_ci_mask_s\n\n del fhe_mult_mask_s, fhe_ci_mask_s\n\n if self.is_shuffle:\n with NamedTimerInstance(\"Shuffle\"):\n refresher = EncRefresherServer(self.sum_shape, fhe_builder, self.sub_name(\"shuffle_refresher\"))\n with NamedTimerInstance(\"refresh\"):\n new_fhe_sum_xor = refresher.request(fhe_sum_xor)\n del fhe_sum_xor\n fhe_sum_xor = self.generate_fhe_shuffled(shuffle_order, new_fhe_sum_xor)\n del refresher\n\n return fhe_sum_xor\n\n def sum_c_i_common(self, alpha_beta_xor_share):\n sum_xor = torch.zeros(self.sum_shape).to(Config.device)\n # the last row of sum_xor is c_{-1}, which helps check the case with x == y\n for i in range(self.work_bit - 1)[::-1]:\n sum_xor[i] = sum_xor[i + 1] + alpha_beta_xor_share[i + 1]\n return sum_xor\n\n def sum_c_i_online(self, beta_i_s, alpha_beta_xor_s, ci_mask_s, mult_mask_s, shuffle_order):\n sum_xor = self.fast_zeros_sum_xor\n # the last row of sum_xor is c_{-1}, which helps check the case with x == y\n for i in range(self.work_bit - 1)[::-1]:\n sum_xor[i] = sum_xor[i + 1] + alpha_beta_xor_s[i + 1]\n sum_xor[self.work_bit] = sum_xor[0] + alpha_beta_xor_s[0]\n for i in range(self.work_bit)[::-1]:\n sum_xor[i] = 3 * sum_xor[i] - beta_i_s[i]\n sum_xor = sum_xor.double() * mult_mask_s\n sum_xor -= ci_mask_s\n sum_xor = pmod(sum_xor, self.q_16).float().to(Config.device)\n if self.is_shuffle:\n sum_xor = shuffle_torch(sum_xor, shuffle_order)\n return sum_xor\n\n def xor_delta_known_offline(self, alpha_i, fhe_beta_i_c, mask_s):\n return self.xor_fhe(alpha_i, fhe_beta_i_c, mask_s, self.q_23, 0)\n\n def xor_delta_known_online(self, alpha_i, beta_i_s, mask_s, modulus):\n res = torch.where(alpha_i == 1, beta_i_s, -beta_i_s)\n res += modulus - mask_s\n res.fmod_(modulus)\n return res\n\n def mod_div_offline(self):\n fhe_builder = self.fhe_builder_23\n\n self.elem_zeros = torch.zeros(self.num_elem).to(Config.device)\n self.correct_mod_div_work_mult = torch.where((self.r < self.nullify_threshold),\n self.elem_zeros,\n self.elem_zeros + self.q_23 // self.work_range).double()\n self.correct_mod_div_work_mask_s = gen_unirand_int_grain(0, self.q_23 - 1, self.num_elem).to(Config.device)\n fhe_mult = fhe_builder.build_plain_from_torch(self.correct_mod_div_work_mult)\n fhe_bias = fhe_builder.build_plain_from_torch(self.correct_mod_div_work_mask_s)\n fhe_correct_mod_div_work = self.common.fhe_pre_corr_mod.get_recv()\n fhe_correct_mod_div_work *= fhe_mult\n fhe_correct_mod_div_work += fhe_bias\n del fhe_mult, fhe_bias\n\n self.common.fhe_corr_mod_c.send(fhe_correct_mod_div_work)\n\n def mod_div_online(self):\n pre_correct_mod_div_s = self.common.pre_corr_mod_s.get_recv()\n\n self.correct_mod_div_work_s = pmod(\n self.correct_mod_div_work_mult * pre_correct_mod_div_s - self.correct_mod_div_work_mask_s, self.q_23)\n\n def offline_recv(self):\n for blob in self.common.offline_client_send:\n blob.prepare_recv()\n\n def online_recv(self):\n for blob in self.common.online_client_send:\n blob.prepare_recv()\n\n def offline(self):\n self.offline_recv()\n\n self.delta_a = gen_unirand_int_grain(0, 1, self.num_elem).to(Config.device)\n # self.s = pmod(1 - 2 * self.delta_a, self.q_16)\n self.s = pmod(1 - 2 * self.delta_a, self.q_16)\n # self.r = gen_unirand_int_grain(0, 2 ** (self.work_bit + 1) - 1, self.num_elem).to(Config.device)\n self.r = gen_unirand_int_grain(0, self.q_23 - 1, self.num_elem).to(Config.device)\n self.alpha = pmod(self.r, self.work_range)\n self.alpha_i = self.common.decomp_to_bit(self.alpha).to(Config.device)\n self.beta_i_mask_s = gen_unirand_int_grain(0, self.q_16 - 1, self.decomp_bit_shape).to(Config.device)\n self.ci_mask_s = gen_unirand_int_grain(0, self.q_16 - 1, [self.work_bit + 1, self.num_elem]).to(Config.device)\n self.ci_mult_mask_s = gen_unirand_int_grain(1, self.q_16 - 1, [self.work_bit + 1, self.num_elem]).to(Config.device)\n self.shuffle_order = torch.rand([self.work_bit + 1, self.num_elem]).argsort(dim=0).to(Config.device)\n self.delta_xor_mask_s = gen_unirand_int_grain(0, self.q_16 - 1, self.num_elem).to(Config.device)\n self.dgk_x_leq_y_mask_s = gen_unirand_int_grain(0, self.q_23 - 1, self.num_elem).to(Config.device)\n self.fast_zeros_sum_xor = torch.zeros(self.sum_shape).to(Config.device)\n\n self.mod_div_offline()\n\n refresher_ab_xor_c = EncRefresherServer(\n self.decomp_bit_shape, self.fhe_builder_16, self.common.sub_name(\"refresher_ab_xor_c\"))\n\n fhe_beta_i_c = self.common.beta_i_c.get_recv()\n fhe_beta_i_c_for_sum_c = [fhe_beta_i_c[i].copy() for i in range(len(fhe_beta_i_c))]\n fhe_alpha_beta_xor_c = self.xor_alpha_known_offline(self.alpha_i, fhe_beta_i_c, self.beta_i_mask_s)\n fhe_alpha_beta_xor_c = refresher_ab_xor_c.request(fhe_alpha_beta_xor_c)\n fhe_c_i_c = self.sum_c_i_offline(self.delta_a, fhe_beta_i_c_for_sum_c, fhe_alpha_beta_xor_c, self.s,\n self.alpha_i, self.ci_mask_s, self.ci_mult_mask_s, self.shuffle_order)\n self.common.c_i_c.send(fhe_c_i_c)\n\n fhe_delta_b_c = self.common.delta_b_c.get_recv()\n fhe_delta_xor_c = self.xor_delta_known_offline(self.delta_a, fhe_delta_b_c, self.delta_xor_mask_s)\n self.common.delta_xor_c.send(fhe_delta_xor_c)\n\n fhe_z_work_c = self.common.z_work_c.get_recv()\n fhe_z_work_c -= fhe_delta_xor_c\n fhe_z_work_c -= self.fhe_builder_23.build_plain_from_torch(self.dgk_x_leq_y_mask_s)\n self.common.dgk_x_leq_y_c.send(fhe_z_work_c)\n\n for ct in fhe_c_i_c + fhe_beta_i_c + fhe_beta_i_c_for_sum_c:\n del ct\n del fhe_beta_i_c, fhe_beta_i_c_for_sum_c, fhe_alpha_beta_xor_c, fhe_c_i_c, fhe_delta_b_c, fhe_delta_xor_c, fhe_z_work_c\n del refresher_ab_xor_c\n\n self.online_recv()\n torch_sync()\n\n def online(self, y_sub_x_s):\n self.z_s = pmod(y_sub_x_s + self.work_range + self.r, self.q_23)\n self.common.z_s.send(self.z_s)\n\n beta_i_s = self.common.beta_i_s.get_recv()\n alpha_beta_xor_s = self.xor_alpha_known_online(self.alpha_i, beta_i_s, self.beta_i_mask_s, self.q_16)\n c_i_s = self.sum_c_i_online(beta_i_s, alpha_beta_xor_s, self.ci_mask_s, self.ci_mult_mask_s, self.shuffle_order)\n self.common.c_i_s.send(c_i_s)\n\n\n delta_b_s = self.common.delta_b_s.get_recv()\n delta_xor_s = self.xor_delta_known_online(self.delta_a, delta_b_s, self.delta_xor_mask_s, self.q_23)\n z_work_s = self.common.z_work_s.get_recv()\n self.mod_div_online()\n\n self.dgk_x_leq_y_s = pmod(\n z_work_s - ((self.r // self.work_range) + delta_xor_s) + self.correct_mod_div_work_s + self.dgk_x_leq_y_mask_s,\n self.q_23)\n\n\nclass DgkBitClient(DgkBase):\n z: torch.Tensor\n def __init__(self, num_elem, q_23, q_16, work_bit, data_bit,\n fhe_builder_16: FheBuilder, fhe_builder_23: FheBuilder, name: str, is_shuffle=None):\n super(DgkBitClient, self).__init__(num_elem, q_23, q_16, work_bit, data_bit, name=name)\n self.fhe_builder_16 = fhe_builder_16\n self.fhe_builder_23 = fhe_builder_23\n self.comm_base = CommBase(Config.client_rank, name)\n self.comm_fhe_16 = CommFheBuilder(Config.client_rank, self.fhe_builder_16, name+'_'+\"comm_fhe_16\")\n self.comm_fhe_23 = CommFheBuilder(Config.client_rank, self.fhe_builder_23, name+'_'+\"comm_fhe_23\")\n self.common = DgkBitCommon(num_elem, q_23, q_16, work_bit, data_bit, name,\n self.comm_base, self.comm_fhe_16, self.comm_fhe_23)\n self.is_shuffle = Config.is_shuffle if is_shuffle is None else is_shuffle\n\n def sum_c_i_offline(self):\n if self.is_shuffle:\n # self.sum_c_refresher = EncRefresherClient(self.sum_shape, self.fhe_builder_16, self.sub_name(\"shuffle_refresher\"))\n # self.sum_c_refresher.prepare_recv()\n self.sum_c_refresher.response()\n del self.sum_c_refresher\n\n def mod_div_offline(self):\n fhe_builder = self.fhe_builder_23\n self.elem_zeros = torch.zeros(self.num_elem).to(Config.device)\n self.pre_mod_div_c = gen_unirand_int_grain(0, self.q_23 - 1, self.num_elem).to(Config.device)\n fhe_correct_mod_div_work = fhe_builder.build_enc_from_torch(self.pre_mod_div_c)\n\n self.common.fhe_pre_corr_mod.send(fhe_correct_mod_div_work)\n fhe_corr_mod_c = self.common.fhe_corr_mod_c.get_recv()\n self.correct_mod_div_work_c = fhe_builder.decrypt_to_torch(fhe_corr_mod_c)\n\n def mod_div_online(self, z):\n pre_correct_mod_div_s = torch.where(z < self.nullify_threshold, self.elem_zeros + 1, self.elem_zeros)\n pre_correct_mod_div_s = pmod(pre_correct_mod_div_s - self.pre_mod_div_c, self.q_23)\n self.common.pre_corr_mod_s.send(pre_correct_mod_div_s)\n\n def offline_recv(self):\n for blob in self.common.offline_server_send:\n blob.prepare_recv()\n\n def online_recv(self):\n for blob in self.common.online_server_send:\n blob.prepare_recv()\n\n def offline(self):\n self.offline_recv()\n\n self.beta_i_c = gen_unirand_int_grain(0, self.q_16 - 1, self.decomp_bit_shape).to(Config.device)\n self.delta_b_c = gen_unirand_int_grain(0, self.q_23 - 1, self.num_elem).to(Config.device)\n self.z_work_c = gen_unirand_int_grain(0, self.q_23 - 1, self.num_elem).to(Config.device)\n self.beta_i_zeros = torch.zeros_like(self.beta_i_c)\n self.fast_ones = torch.ones(self.num_elem).to(Config.device)\n self.fast_zeros = torch.zeros(self.num_elem).to(Config.device)\n self.fast_ones_c_i = torch.ones(self.sum_shape).float().to(Config.device)\n self.fast_zeros_c_i = torch.zeros(self.sum_shape).float().to(Config.device)\n\n self.common.beta_i_c.send_from_torch(self.beta_i_c)\n self.common.delta_b_c.send_from_torch(self.delta_b_c)\n self.common.z_work_c.send_from_torch(self.z_work_c)\n\n if self.is_shuffle:\n self.sum_c_refresher = EncRefresherClient(self.sum_shape, self.fhe_builder_16, self.sub_name(\"shuffle_refresher\"))\n\n self.mod_div_offline()\n\n refresher_ab_xor_c = EncRefresherClient(\n self.decomp_bit_shape, self.fhe_builder_16, self.common.sub_name(\"refresher_ab_xor_c\"))\n refresher_ab_xor_c.response()\n self.sum_c_i_offline()\n\n self.c_i_c = self.common.c_i_c.get_recv_decrypt()\n self.delta_xor_c = self.common.delta_xor_c.get_recv_decrypt()\n self.dgk_x_leq_y_c = self.common.dgk_x_leq_y_c.get_recv_decrypt()\n self.dgk_x_leq_y_c = pmod(self.dgk_x_leq_y_c + self.correct_mod_div_work_c, self.q_23)\n\n self.online_recv()\n torch_sync()\n\n def online(self, y_sub_x_c):\n z_s = self.common.z_s.get_recv()\n z = pmod(z_s + y_sub_x_c.to(Config.device), self.q_23)\n self.z = z\n beta = pmod(z, self.work_range)\n beta_i = self.common.decomp_to_bit(beta, res=self.beta_i_zeros).to(Config.device)\n beta_i_s = pmod(beta_i.to(Config.device) - self.beta_i_c.to(Config.device), self.q_16)\n self.common.beta_i_s.send(beta_i_s)\n\n c_i_s = self.common.c_i_s.get_recv()\n c_i = pmod(c_i_s + self.c_i_c, self.q_16)\n check_zeros = torch.where(c_i == 0, torch.tensor(1).to(Config.device), torch.tensor(0).to(Config.device))\n delta_b = torch.where(torch.sum(check_zeros, 0) > 0,\n self.fast_ones, self.fast_zeros)\n delta_b_s = pmod(delta_b - self.delta_b_c, self.q_23)\n self.common.delta_b_s.send(delta_b_s)\n z_work_s = pmod(z // self.work_range - self.z_work_c, self.q_23)\n self.common.z_work_s.send(z_work_s)\n self.mod_div_online(z)\n\n\ndef test_dgk(input_sid, master_address, master_port, num_elem=2**17):\n print(\"\\nTest for Dgk Basic: Start\")\n data_bit = 20\n work_bit = 20\n # q_23 = 786433\n # q_23 = 8273921\n # q_23 = 4079617\n # n_23, q_23 = 8192, 7340033\n n_23, q_23 = Config.n_23, Config.q_23\n # n_16, q_16 = 2048, 12289\n # n_16, q_16 = 8192, 65537\n n_16, q_16 = Config.n_16, Config.q_16\n print(f\"Number of element: {num_elem}\")\n\n data_range = 2 ** data_bit\n work_range = 2 ** work_bit\n\n def check_correctness(x, y, dgk_x_leq_y_s, dgk_x_leq_y_c):\n x = torch.where(x < q_23 // 2, x, x - q_23).to(Config.device)\n y = torch.where(y < q_23 // 2, y, y - q_23).to(Config.device)\n expected_x_leq_y = (x <= y)\n dgk_x_leq_y_recon = pmod(dgk_x_leq_y_s + dgk_x_leq_y_c, q_23)\n compare_expected_actual(expected_x_leq_y, dgk_x_leq_y_recon, name=\"DGK x <= y\", get_relative=True)\n print(torch.sum(expected_x_leq_y != dgk_x_leq_y_recon))\n\n def check_correctness_mod_div(r, z, correct_mod_div_work_s, correct_mod_div_work_c):\n elem_zeros = torch.zeros(num_elem).to(Config.device)\n expected = torch.where(r > z, q_23//work_range + elem_zeros, elem_zeros)\n actual = pmod(correct_mod_div_work_s + correct_mod_div_work_c, q_23)\n compare_expected_actual(expected, actual, get_relative=True, name=\"mod_div_online\")\n\n def test_server():\n rank = Config.server_rank\n init_communicate(Config.server_rank, master_address=master_address, master_port=master_port)\n warming_up_cuda()\n traffic_record = TrafficRecord()\n\n fhe_builder_16 = FheBuilder(q_16, Config.n_16)\n fhe_builder_23 = FheBuilder(q_23, Config.n_23)\n comm_fhe_16 = CommFheBuilder(rank, fhe_builder_16, \"fhe_builder_16\")\n comm_fhe_23 = CommFheBuilder(rank, fhe_builder_23, \"fhe_builder_23\")\n torch_sync()\n comm_fhe_16.recv_public_key()\n comm_fhe_23.recv_public_key()\n comm_fhe_16.wait_and_build_public_key()\n comm_fhe_23.wait_and_build_public_key()\n\n dgk = DgkBitServer(num_elem, q_23, q_16, work_bit, data_bit, fhe_builder_16, fhe_builder_23, \"DgkBitTest\")\n\n x_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"x\")\n y_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"y\")\n y_sub_x_s_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"y_sub_x_s\")\n x_blob.prepare_recv()\n y_blob.prepare_recv()\n y_sub_x_s_blob.prepare_recv()\n torch_sync()\n x = x_blob.get_recv()\n y = y_blob.get_recv()\n y_sub_x_s = y_sub_x_s_blob.get_recv()\n\n torch_sync()\n with NamedTimerInstance(\"Server Offline\"):\n dgk.offline()\n # y_sub_x_s = pmod(y_s.to(Config.device) - x_s.to(Config.device), q_23)\n torch_sync()\n traffic_record.reset(\"server-offline\")\n\n with NamedTimerInstance(\"Server Online\"):\n dgk.online(y_sub_x_s)\n traffic_record.reset(\"server-online\")\n\n dgk_x_leq_y_c_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"dgk_x_leq_y_c\")\n correct_mod_div_work_c_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"correct_mod_div_work_c\")\n z_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"z\")\n dgk_x_leq_y_c_blob.prepare_recv()\n correct_mod_div_work_c_blob.prepare_recv()\n z_blob.prepare_recv()\n torch_sync()\n dgk_x_leq_y_c = dgk_x_leq_y_c_blob.get_recv()\n correct_mod_div_work_c = correct_mod_div_work_c_blob.get_recv()\n z = z_blob.get_recv()\n check_correctness(x, y, dgk.dgk_x_leq_y_s, dgk_x_leq_y_c)\n check_correctness_mod_div(dgk.r, z, dgk.correct_mod_div_work_s, correct_mod_div_work_c)\n end_communicate()\n\n def test_client():\n rank = Config.client_rank\n init_communicate(rank, master_address=master_address, master_port=master_port)\n warming_up_cuda()\n traffic_record = TrafficRecord()\n\n fhe_builder_16 = FheBuilder(q_16, n_16)\n fhe_builder_23 = FheBuilder(q_23, n_23)\n fhe_builder_16.generate_keys()\n fhe_builder_23.generate_keys()\n comm_fhe_16 = CommFheBuilder(rank, fhe_builder_16, \"fhe_builder_16\")\n comm_fhe_23 = CommFheBuilder(rank, fhe_builder_23, \"fhe_builder_23\")\n torch_sync()\n comm_fhe_16.send_public_key()\n comm_fhe_23.send_public_key()\n\n dgk = DgkBitClient(num_elem, q_23, q_16, work_bit, data_bit, fhe_builder_16, fhe_builder_23, \"DgkBitTest\")\n\n x = gen_unirand_int_grain(-data_range // 2 + 1, data_range // 2, num_elem)\n y = gen_unirand_int_grain(-data_range // 2 + 1, data_range // 2, num_elem)\n x_c = gen_unirand_int_grain(-data_range // 2 + 1, data_range // 2, num_elem)\n y_c = gen_unirand_int_grain(-data_range // 2 + 1, data_range // 2, num_elem)\n x_s = pmod(x - x_c, q_23)\n y_s = pmod(y - y_c, q_23)\n y_sub_x_s = pmod(y_s - x_s, q_23)\n\n x_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"x\")\n y_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"y\")\n y_sub_x_s_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"y_sub_x_s\")\n torch_sync()\n x_blob.send(x)\n y_blob.send(y)\n y_sub_x_s_blob.send(y_sub_x_s)\n\n torch_sync()\n with NamedTimerInstance(\"Client Offline\"):\n dgk.offline()\n y_sub_x_c = pmod(y_c - x_c, q_23)\n traffic_record.reset(\"client-offline\")\n torch_sync()\n\n with NamedTimerInstance(\"Client Online\"):\n dgk.online(y_sub_x_c)\n traffic_record.reset(\"client-online\")\n\n dgk_x_leq_y_c_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"dgk_x_leq_y_c\")\n correct_mod_div_work_c_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"correct_mod_div_work_c\")\n z_blob = BlobTorch(num_elem, torch.float, dgk.comm_base, \"z\")\n torch_sync()\n dgk_x_leq_y_c_blob.send(dgk.dgk_x_leq_y_c)\n correct_mod_div_work_c_blob.send(dgk.correct_mod_div_work_c)\n z_blob.send(dgk.z)\n end_communicate()\n\n if input_sid == Config.both_rank:\n marshal_funcs([test_server, test_client])\n elif input_sid == Config.server_rank:\n marshal_funcs([test_server])\n elif input_sid == Config.client_rank:\n marshal_funcs([test_client])\n\n print(\"\\nTest for Dgk Basic: End\")\n\nif __name__ == \"__main__\":\n input_sid, master_address, master_port, test_to_run = argparser_distributed()\n sys.stdout = Logger()\n\n num_repeat = 10\n num_elem_try = [10000, 40000] + [2 ** i for i in range(10, 19)]\n # num_elem_try = [10000, 40000] + [2 ** i for i in range(10, 19)]\n # num_repeat = 1\n # num_elem_try = [2 ** 14]\n\n for _, num_elem in product(range(num_repeat), num_elem_try):\n test_dgk(input_sid, master_address, master_port, num_elem=num_elem)\n"
] |
[
[
"torch.device",
"torch.nn.MaxPool2d",
"torch.Size",
"torch.zeros"
],
[
"torch.ones",
"torch.zeros",
"torch.clone",
"torch.sum",
"torch.zeros_like",
"torch.tensor",
"torch.rand",
"torch.where"
]
] |
bugchecker/hummingbot
|
[
"a7544f028b6d8a16a363058ea5f9968dd98dc589"
] |
[
"hummingbot/client/command/history_command.py"
] |
[
"from decimal import Decimal\n\nimport pandas as pd\nimport threading\nfrom typing import (\n Any,\n Dict,\n Set,\n Tuple,\n TYPE_CHECKING,\n)\nfrom hummingbot.client.performance_analysis import PerformanceAnalysis\nfrom hummingbot.core.utils.exchange_rate_conversion import ExchangeRateConversion\nfrom hummingbot.market.market_base import MarketBase\nfrom hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple\n\nERC = ExchangeRateConversion.get_instance()\ns_float_0 = float(0)\n\n\nif TYPE_CHECKING:\n from hummingbot.client.hummingbot_application import HummingbotApplication\n\n\nclass HistoryCommand:\n def history(self, # type: HummingbotApplication\n ):\n if threading.current_thread() != threading.main_thread():\n self.ev_loop.call_soon_threadsafe(self.history)\n return\n\n if not all(market.ready for market in self.markets.values()):\n self._notify(\" History stats are not available before Markets are ready.\")\n return\n self.list_trades()\n self.trade_performance_report()\n\n def balance_snapshot(self, # type: HummingbotApplication\n ) -> Dict[str, Dict[str, float]]:\n snapshot: Dict[str, Any] = {}\n for market_name in self.markets:\n balance_dict = self.markets[market_name].get_all_balances()\n balance_dict = {k.upper(): v for k, v in balance_dict.items()}\n\n for asset in self.assets:\n asset = asset.upper()\n if asset not in snapshot:\n snapshot[asset] = {}\n if asset in balance_dict:\n snapshot[asset][market_name] = balance_dict[asset]\n else:\n snapshot[asset][market_name] = 0.0\n return snapshot\n\n def balance_comparison_data_frame(self, # type: HummingbotApplication\n market_trading_pair_stats: Dict[MarketTradingPairTuple, any],\n ) -> pd.DataFrame:\n if len(self.starting_balances) == 0:\n self._notify(\" Balance snapshots are not available before bot starts\")\n return\n rows = []\n for market_trading_pair_tuple in self.market_trading_pair_tuples:\n market: MarketBase = market_trading_pair_tuple.market\n for asset in set(a.upper() for a in self.assets):\n asset_delta: Dict[str, float] = market_trading_pair_stats[market_trading_pair_tuple][\"asset\"].get(\n asset, {\"delta\": s_float_0})\n starting_balance = self.starting_balances.get(asset).get(market.name)\n current_balance = self.balance_snapshot().get(asset).get(market.name)\n rows.append([market.display_name,\n asset,\n float(starting_balance),\n float(current_balance),\n float(current_balance - starting_balance),\n float(asset_delta[\"delta\"]),\n ERC.adjust_token_rate(asset, Decimal(1))])\n df = pd.DataFrame(rows, index=None, columns=[\"Market\", \"Asset\", \"Starting\", \"Current\", \"Net_Delta\",\n \"Trade_Delta\", \"Conversion_Rate\"])\n return df\n\n def get_performance_analysis_with_updated_balance(self, # type: HummingbotApplication\n ) -> PerformanceAnalysis:\n performance_analysis = PerformanceAnalysis()\n dedup_set: Set[Tuple[str, str, bool]] = set()\n\n for market_trading_pair_tuple in self.market_trading_pair_tuples:\n for is_base in [True, False]:\n for is_starting in [True, False]:\n market_name = market_trading_pair_tuple.market.name\n asset_name = market_trading_pair_tuple.base_asset if is_base else market_trading_pair_tuple.quote_asset\n asset_name = asset_name.upper()\n if len(self.assets) == 0 or len(self.markets) == 0:\n # Prevent KeyError '***asset_name***'\n amount = self.starting_balances[asset_name][market_name]\n else:\n amount = self.starting_balances[asset_name][market_name] if is_starting \\\n else self.balance_snapshot()[asset_name][market_name]\n amount = float(amount)\n\n # Adding this check to prevent assets in the same market to be added multiple times\n if (market_name, asset_name, is_starting) not in dedup_set:\n dedup_set.add((market_name, asset_name, is_starting))\n performance_analysis.add_balances(asset_name, amount, is_base, is_starting)\n\n return performance_analysis\n\n def get_market_mid_price(self, # type: HummingbotApplication\n ) -> float:\n # Compute the current exchange rate. We use the first market_trading_pair_tuples because\n # if the trading pairs are different, such as WETH-DAI and ETH-USD, the currency\n # pairs above will contain the information in terms of the first trading pair.\n market_pair_info = self.market_trading_pair_tuples[0]\n market = market_pair_info.market\n buy_price = market.get_price(market_pair_info.trading_pair, True)\n sell_price = market.get_price(market_pair_info.trading_pair, False)\n price = float((buy_price + sell_price) / 2)\n return price\n\n def analyze_performance(self, # type: HummingbotApplication\n ):\n \"\"\" Calculate bot profitability and print to output pane \"\"\"\n if len(self.starting_balances) == 0:\n self._notify(\" Performance analysis is not available before bot starts\")\n return\n\n performance_analysis: PerformanceAnalysis = self.get_performance_analysis_with_updated_balance()\n price: float = self.get_market_mid_price()\n\n starting_token, starting_amount = performance_analysis.compute_starting(price)\n current_token, current_amount = performance_analysis.compute_current(price)\n delta_token, delta_amount = performance_analysis.compute_delta(price)\n return_performance = performance_analysis.compute_return(price)\n\n starting_amount = round(starting_amount, 3)\n current_amount = round(current_amount, 3)\n delta_amount = round(delta_amount, 3)\n return_performance = round(return_performance, 3)\n\n print_performance = \"\\n\"\n print_performance += \" Performance:\\n\"\n print_performance += \" - Starting Inventory Value: \" + str(starting_amount) + \" \" + starting_token + \"\\n\"\n print_performance += \" - Current Inventory Value: \" + str(current_amount) + \" \" + current_token + \"\\n\"\n print_performance += \" - Delta: \" + str(delta_amount) + \" \" + delta_token + \"\\n\"\n print_performance += \" - Return: \" + str(return_performance) + \"%\"\n self._notify(print_performance)\n\n def calculate_profitability(self) -> float:\n \"\"\" Determine the profitability of the trading bot. \"\"\"\n performance_analysis: PerformanceAnalysis = self.get_performance_analysis_with_updated_balance()\n price: float = self.get_market_mid_price()\n return_performance = performance_analysis.compute_return(price)\n return return_performance\n\n def trade_performance_report(self, # type: HummingbotApplication\n ) -> pd.DataFrame:\n\n if len(self.market_trading_pair_tuples) == 0:\n self._notify(\" Performance analysis is not available before bot starts\")\n return\n\n try:\n raw_queried_trades = self._get_trades_from_session(self.init_time)\n current_strategy_name: str = self.markets_recorder.strategy_name\n primary_quote_asset: str = self.market_trading_pair_tuples[0].quote_asset.upper()\n performance_analysis: PerformanceAnalysis = PerformanceAnalysis()\n trade_performance_stats, market_trading_pair_stats = performance_analysis.calculate_trade_performance(\n current_strategy_name,\n self.market_trading_pair_tuples,\n raw_queried_trades,\n )\n trade_performance_status_line = []\n market_df_data: Set[Tuple[str, str, float, float, str, str]] = set()\n market_df_columns = [\"Market\", \"Trading_Pair\", \"Start_Price\", \"End_Price\",\n \"Total_Value_Delta\", \"Profit\"]\n\n for market_trading_pair_tuple, trading_pair_stats in market_trading_pair_stats.items():\n market_df_data.add((\n market_trading_pair_tuple.market.display_name,\n market_trading_pair_tuple.trading_pair.upper(),\n float(trading_pair_stats[\"starting_quote_rate\"]),\n float(trading_pair_stats[\"end_quote_rate\"]),\n f\"{trading_pair_stats['trading_pair_delta']:.8f} {primary_quote_asset}\",\n f\"{trading_pair_stats['trading_pair_delta_percentage']:.3f} %\"\n ))\n\n inventory_df: pd.DataFrame = self.balance_comparison_data_frame(market_trading_pair_stats)\n market_df: pd.DataFrame = pd.DataFrame(data=list(market_df_data), columns=market_df_columns)\n portfolio_delta: Decimal = trade_performance_stats[\"portfolio_delta\"]\n portfolio_delta_percentage: Decimal = trade_performance_stats[\"portfolio_delta_percentage\"]\n\n trade_performance_status_line.extend([\"\", \" Inventory:\"] +\n [\" \" + line for line in inventory_df.to_string().split(\"\\n\")])\n trade_performance_status_line.extend([\"\", \" Market Trading Pair Performance:\"] +\n [\" \" + line for line in market_df.to_string().split(\"\\n\")])\n\n trade_performance_status_line.extend(\n [\"\", \" Portfolio Performance:\"] +\n [f\" Quote Value Delta: {portfolio_delta:.7g} {primary_quote_asset}\"] +\n [f\" Delta Percentage: {portfolio_delta_percentage:.3f} %\"])\n\n self._notify(\"\\n\".join(trade_performance_status_line))\n\n except Exception:\n self.logger().error(\"Unexpected error running performance analysis.\", exc_info=True)\n self._notify(\"Error running performance analysis\")\n"
] |
[
[
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.