repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
DMALab/TSplit
|
[
"8f86f987163aa06521bfeeb174616eb4a0a81b47"
] |
[
"python/athena/gpu_ops/StreamExecutor.py"
] |
[
"\"\"\" library to take autodiff and execute a computation graph \"\"\"\nfrom __future__ import absolute_import\nimport numpy as np\nfrom .Node import Op\nfrom .. import ndarray\nfrom ..stream import *\n\nimport ctypes\nimport os\nfrom pynvml import *\nFLAG_SHOW_GRAPH = False\nG_NODE_ID = 0\nNAME_RULE = 1\n\n\ndef communicate_init(worker_num, worker_id, source_ip, target_ip):\n global lib_communicate\n # lib_communicate.DL_Connect_Init(2, 0, \"*:4001\", \"localhost:4002\")\n # lib_communicate.DL_Connect_Init(2, 1, \"*:4002\", \"localhost:4001\")\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n lib_path = os.path.join(curr_path, '../../build/lib/')\n path_to_so_file = os.path.join(lib_path, \"lib_communication.so\")\n lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file)\n lib_communicate.DL_Connect_Init(\n worker_num, worker_id, source_ip, target_ip)\n\n\ndef communicate_finish():\n lib_communicate.DL_Communicate_Close()\n\n\nclass Distributed_CommunicateOp(Op):\n def __call__(self, nodeA):\n new_node = Op.__call__(self)\n new_node.inputs = [nodeA]\n new_node.name = \"Distributed_Communicate(%s)\" % (nodeA.name)\n # print nodeA.name\n return new_node\n\n def compute(self, node, input_vals, output_val, use_numpy=True):\n after_reduce_gradient_cpu = ndarray.empty(\n shape=output_val.shape, ctx=ndarray.cpu(0))\n if use_numpy:\n gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0))\n else:\n gradient_val_cpu = ndarray.array(\n input_vals[0].asnumpy(), ctx=ndarray.cpu(0))\n # print gradient_val_cpu.asnumpy()\n lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle)\n lib_communicate.DL_Communicate(\n gradient_val_cpu.handle, after_reduce_gradient_cpu.handle)\n # print after_reduce_gradient_cpu.asnumpy()\n if use_numpy:\n output_val[:] = after_reduce_gradient_cpu.asnumpy()\n else:\n after_reduce_gradient_cpu.copyto(output_val)\n\n def gradient(self, node, output_grad):\n raise NotImplementedError\n\n def infer_shape(self, node, input_shapes):\n return input_shapes[0]\n\n\ndistributed_communicate_op = Distributed_CommunicateOp()\n\n\nclass StreamExecutor(object):\n \"\"\"Executor computes values for given set of nodes in computation graph.\"\"\"\n\n def __init__(self, eval_node_list, ctx = None, stream = None, policy = None):\n \"\"\"\n Parameters\n ----------\n eval_node_list: list of nodes whose values need to be computed.\n ctx: runtime DLContext, default is None which means np.ndarray on cpu\n topo_order: list of nodes in topological order\n node_to_shape_map: dict from node to shape of the node\n node_to_arr_map: dict from node to ndarray.NDArray allocated for node\n feed_shapes: shapes of feed_dict from last run(...)\n \"\"\"\n self.eval_node_list = eval_node_list\n self.ctx = ctx\n if stream is None:\n self.stream = create_stream_handle(ctx)\n else:\n self.stream = stream\n self.stream.sync()\n self.topo_order = find_topo_sort(self.eval_node_list)\n self.node_to_shape_map = None\n self.node_to_arr_map = None\n self.feed_shapes = None\n self.policy = policy\n if self.policy == 'swap':\n self.swap_queue = []\n\n def infer_shape(self, feed_shapes):\n \"\"\"Given shapes of feed_dict nodes, infer shape for all nodes in graph.\n\n Implementation note:\n Iteratively calls node.op.infer_shape to infer shapes.\n Node shapes stored in self.node_to_shape_map.\n\n Parameters\n ----------\n feed_shapes: node->shapes mapping for feed_dict nodes.\n \"\"\"\n \"\"\"TODO: Your code here\"\"\"\n self.node_to_shape_map = {}\n\n for node in self.topo_order:\n if node in feed_shapes:\n self.node_to_shape_map[node] = feed_shapes[node]\n else:\n # print(node.name)\n input_shapes = [self.node_to_shape_map[n] for n in node.inputs]\n self.node_to_shape_map[node] = node.op.infer_shape(\n node, input_shapes)\n\n def memory_plan(self, feed_shapes):\n \"\"\"Allocates ndarray.NDArray for every node except feed_dict nodes.\n\n Implementation note:\n Option 1: Alloc a ndarray.NDArray per node that persists across run()\n Option 2: Implement a memory pool to reuse memory for nodes of same\n shapes. More details see Lecture 7.\n\n For both options, self.node_to_arr_map stores node->NDArray mapping to\n allow mapping to persist across multiple executor.run().\n\n Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.\n\n Parameters\n ----------\n feed_shapes: node->shapes mapping for feed_dict nodes.\n \"\"\"\n \"\"\"TODO: Your code here\"\"\"\n assert (self.ctx is not None)\n # self.infer_shape(feed_shapes)\n self.node_to_arr_map = {}\n for node, shape in self.node_to_shape_map.items():\n if self.policy == 'swap':\n if not node.swap:\n self.node_to_arr_map[node] = ndarray.empty(\n shape, ctx=self.ctx)\n elif self.policy == 'vdnn':\n self.node_to_arr_map[node] = np.empty(shape)\n else:\n self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)\n\n def run(self, feed_dict, convert_to_numpy_ret_vals=False):\n \"\"\"\n Parameters\n ----------\n feed_dict: a dictionary of node->np.ndarray supplied by user.\n convert_to_numpy_ret_vals: whether to convert ret vals to np.array\n\n Returns\n -------\n A list of values for nodes in eval_node_list. NDArray or np.ndarray.\n \"\"\"\n def are_feed_shapes_equal(sa, sb):\n if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):\n return False\n unmatched_item = set(sa.items()) ^ set(sb.items())\n return len(unmatched_item) == 0\n\n # Assume self.ctx is None implies numpy array and numpy ops.\n\n use_numpy = self.ctx is None\n node_to_val_map = {}\n for node, value in feed_dict.items():\n if use_numpy:\n # all values passed in feed_dict must be np.ndarray\n assert isinstance(value, np.ndarray)\n node_to_val_map[node] = value\n else:\n # convert values to ndarray.NDArray if necessary\n if isinstance(value, np.ndarray):\n node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)\n elif isinstance(value, ndarray.NDArray):\n node_to_val_map[node] = value\n else:\n assert False, \"feed_dict value type not supported\"\n # print\"xxxx\"\n # collect shapes for all placeholders\n # infer shape if feed_shapes changed since last run\n # e.g. call run() on test data after trainng\n # print feed_shapes\n feed_shapes = {}\n for node in node_to_val_map:\n feed_shapes[node] = node_to_val_map[node].shape\n\n if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):\n self.infer_shape(feed_shapes)\n self.feed_shapes = feed_shapes\n if (not use_numpy):\n self.memory_plan(self.feed_shapes)\n\n for node in self.topo_order:\n if node in node_to_val_map:\n continue\n input_vals = [node_to_val_map[n] for n in node.inputs]\n if use_numpy:\n node_val = np.empty(shape=self.node_to_shape_map[node])\n else:\n node_val = self.node_to_arr_map[node]\n # print(node.name)\n node.op.compute(node, input_vals, node_val, use_numpy, self.stream)\n node_to_val_map[node] = node_val\n self.stream.sync()\n if not use_numpy and convert_to_numpy_ret_vals:\n return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]\n return [node_to_val_map[n] for n in self.eval_node_list]\n\n # def run(self, feed_dict, convert_to_numpy_ret_vals=False):\n # \"\"\"\n # Parameters\n # ----------\n # feed_dict: a dictionary of node->np.ndarray supplied by user.\n # convert_to_numpy_ret_vals: whether to convert ret vals to np.array\n\n # Returns\n # -------\n # A list of values for nodes in eval_node_list. NDArray or np.ndarray.\n # \"\"\"\n # def are_feed_shapes_equal(sa, sb):\n # if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):\n # return False\n # unmatched_item = set(sa.items()) ^ set(sb.items())\n # return len(unmatched_item) == 0\n\n # # Assume self.ctx is None implies numpy array and numpy ops.\n\n # use_numpy = self.ctx is None\n # node_to_val_map = {}\n # for node, value in feed_dict.items():\n # if self.policy == 'vdnn':\n # assert isinstance(value, np.ndarray)\n # node_to_val_map[node] = value\n # else:\n # if use_numpy:\n # # all values passed in feed_dict must be np.ndarray\n # assert isinstance(value, np.ndarray)\n # node_to_val_map[node] = value\n # else:\n # # convert values to ndarray.NDArray if necessary\n # if isinstance(value, np.ndarray):\n # if self.policy == 'swap':\n # if node.swap == True:\n # node_to_val_map[node] = value\n # else:\n # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)\n # else:\n # node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)\n # elif isinstance(value, ndarray.NDArray):\n # node_to_val_map[node] = value\n # else:\n # assert False, \"feed_dict value type not supported\"\n\n # # collect shapes for all placeholders\n # feed_shapes = {}\n # for node in node_to_val_map:\n # feed_shapes[node] = node_to_val_map[node].shape\n\n # # infer shape if feed_shapes changed since last run\n # # e.g. call run() on test data after trainng\n # # print feed_shapes\n # if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):\n # self.infer_shape(feed_shapes)\n # self.feed_shapes = feed_shapes\n # if not self.policy == 'vdnn':\n # # plan memory if using GPU\n # if (not use_numpy):\n # self.memory_plan(feed_shapes)\n # # Traverse graph in topo order and compute values for all nodes.\n # global FLAG_SHOW_GRAPH\n # if self.policy == 'swap':\n # # generate swap queue\n # if not use_numpy:\n # for node in self.topo_order:\n # if node not in node_to_val_map:\n # # variable in placeholder\n # for input_node in node.inputs:\n # if input_node.swap == True:\n # self.swap_queue.append(input_node)\n # # variable grad\n # if node.swap == True:\n # self.swap_queue.append(node)\n # node_in_GPU = None\n # if FLAG_SHOW_GRAPH:\n # print \"Show swap queue:\"\n # for node in self.swap_queue:\n # print node\n # elif self.policy == 'vdnn':\n # # TODO traverse graph to select in-gpu window\n # window = [0,0]\n # if not use_numpy:\n # nvmlInit()\n # handle = nvmlDeviceGetHandleByIndex(0)\n # info = nvmlDeviceGetMemoryInfo(handle)\n # gpu_mem = info.free\n # nvmlShutdown()\n # loss_node = self.eval_node_list[0]\n # window[1] = self.topo_order.index(loss_node)+1\n # window[0] = self.topo_order.index(loss_node)+1\n # for node in reversed(self.topo_order[:window[1]+1]):\n # node_size = 4 # float32\n # #print node, self.node_to_shape_map[node]\n # for shape in self.node_to_shape_map[node]:\n # node_size = node_size * shape\n # if gpu_mem > node_size:\n # gpu_mem = gpu_mem - node_size\n # window[0] = window[0] - 1\n # #print \"gpu_mem:\",gpu_mem\n # # Traverse graph in topo order and compute values for all nodes.\n # if FLAG_SHOW_GRAPH:\n # print \"run topo_order\"\n # # Show graph dependency\n # if FLAG_SHOW_GRAPH:\n # print \"node:\",node\n # print \"node.desc:\",node.desc\n\n # for node in self.topo_order:\n # if self.policy == 'vdnn':\n # # Skip placeholder nodes\n # if node in node_to_val_map:\n # continue\n # # H2D before compute\n # ## Collect inputs\n # input_vals = []\n # for n in node.inputs:\n # if not use_numpy:\n # if isinstance(node_to_val_map[n], np.ndarray):\n # node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx)\n # input_vals.append(node_to_val_map[n])\n # ## Alloc node space\n # if use_numpy:\n # node_val = np.empty(shape=self.node_to_shape_map[node])\n # else:\n # node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx)\n # # Compute\n # # node_val is modified in-place whether np.ndarray or NDArray\n # node.op.compute(node, input_vals, node_val, use_numpy)\n # # D2H after compute\n # if use_numpy:\n # node_to_val_map[node] = node_val\n # else:\n # node_index = self.topo_order.index(node)\n # if node_index > window[0] and node_index < window[1]:\n # node_to_val_map[node] = node_val\n # continue\n # node_to_val_map[node] = node_val.asnumpy()\n # del node_val\n # for n in node.inputs:\n # if isinstance(node_to_val_map[n], ndarray.NDArray):\n # tmp_val = node_to_val_map[n].asnumpy()\n # del node_to_val_map[n]\n # node_to_val_map[n] = tmp_val\n # elif self.policy == 'swap':\n # # Switch in GPU\n # if not use_numpy:\n # if self.swap_queue and (node_in_GPU==None):\n # swap_node = self.swap_queue[0]\n # if swap_node in node_to_val_map:\n # node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx)\n # else:\n # self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx)\n # node_in_GPU = swap_node.id\n\n # if node in node_to_val_map:\n # # Skip placeholder nodes. Values already provided by feed_dict.\n # continue\n # # Compute\n # input_vals = [node_to_val_map[n] for n in node.inputs]\n # if use_numpy:\n # node_val = np.empty(shape=self.node_to_shape_map[node])\n # else:\n # node_val = self.node_to_arr_map[node]\n # # node_val is modified in-place whether np.ndarray or NDArray\n # node.op.compute(node, input_vals, node_val, use_numpy)\n # if node.swap == True:\n # node_to_val_map[node] = node_val.asnumpy()\n # del node_val\n # del self.node_to_arr_map[node]\n # del self.swap_queue[0]\n # node_in_GPU = None\n # else:\n # node_to_val_map[node] = node_val\n # # Switch out GPU\n # if not use_numpy:\n # if self.swap_queue:\n # if self.swap_queue[0] in node.inputs:\n # out_node = self.swap_queue.pop(0)\n # if self.swap_queue:\n # if not self.swap_queue[0].id == node_in_GPU:\n # tmp_array = node_to_val_map[out_node].asnumpy()\n # del node_to_val_map[out_node]\n # node_to_val_map[out_node] = tmp_array\n # node_in_GPU = None\n # else:\n # if node in node_to_val_map:\n # # Skip placeholder nodes. Values already provided by feed_dict.\n # continue\n # input_vals = [node_to_val_map[n] for n in node.inputs]\n # # print self.node_to_shape_map[node]\n\n # if use_numpy:\n # node_val = np.empty(shape=self.node_to_shape_map[node])\n # else:\n # node_val = self.node_to_arr_map[node]\n # # node_val is modified in-place whether np.ndarray or NDArray\n # # if (len(node.inputs) == 1):\n # # print \"computs\",node.inputs[0].name\n # # else:\n # # print \"computs\",node.inputs[0].name,node.inputs[1].name\n # # print node.name\n\n # # print node_val.shape\n # # print \"xxx\"\n # # print node.name\n # node.op.compute(node, input_vals, node_val, use_numpy)\n # # print \"xxx\"\n # node_to_val_map[node] = node_val\n # # print \"xxx\"\n\n # if FLAG_SHOW_GRAPH:\n # FLAG_SHOW_GRAPH = False\n # # Collect node values.\n # if not use_numpy and convert_to_numpy_ret_vals:\n # if self.policy == 'swap':\n # node_values = []\n # for n in self.eval_node_list:\n # if n.swap == True:\n # node_values.append(node_to_val_map[n])\n # else:\n # node_values.append(node_to_val_map[n].asnumpy())\n # return node_values\n # elif self.policy == 'vdnn':\n # return [node_to_val_map[n] for n in self.eval_node_list]\n # else:\n # return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]\n # return [node_to_val_map[n] for n in self.eval_node_list]\n\n\ndef gradients(output_node, node_list, scheduler_policy=None):\n \"\"\"Take gradient of output node with respect to each node in node_list.\n\n Parameters\n ----------\n output_node: output node that we are taking derivative of.\n node_list: list of nodes that we are taking derivative wrt.\n\n Returns\n -------\n A list of gradient values, one for each node in node_list respectively.\n\n \"\"\"\n from . import OnesLike\n node_to_output_grads_list = {}\n node_to_output_grads_list[output_node] = [\n OnesLike.oneslike_op(output_node)]\n node_to_output_grad = {}\n # Traverse forward graph in reverse topological order\n reverse_topo_order = reversed(find_topo_sort([output_node]))\n\n for node in reverse_topo_order:\n output_grad = sum_node_list(node_to_output_grads_list[node])\n node_to_output_grad[node] = output_grad\n input_grads_list = node.op.gradient(node, output_grad)\n #print len(node.name)\n #print len(node.inputs)\n #raw_input(\"\\n\\nPress the enter key to exit.\")\n for i in range(len(node.inputs)):\n if node.inputs[i] not in node_to_output_grads_list:\n node_to_output_grads_list[node.inputs[i]] = []\n # Calculate partial adjoint for input nodes.\n # print node.name\n node_to_output_grads_list[node.inputs[i]].append(\n input_grads_list[i])\n if scheduler_policy == 'swap':\n for node in node_list:\n if node.swap:\n node_to_output_grad[node].swap = True\n\n grad_node_list = [node_to_output_grad[node] for node in node_list]\n # grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list]\n return grad_node_list\n\n\ndef distributed_gradients(output_node, node_list, scheduler_policy=None):\n \"\"\"Take gradient of output node with respect to each node in node_list.\n\n Parameters\n ----------\n output_node: output node that we are taking derivative of.\n node_list: list of nodes that we are taking derivative wrt.\n\n Returns\n -------\n A list of gradient values, one for each node in node_list respectively.\n\n \"\"\"\n from .OnesLike import oneslike_op\n\n node_to_output_grads_list = {}\n node_to_output_grads_list[output_node] = [oneslike_op(output_node)]\n node_to_output_grad = {}\n # Traverse forward graph in reverse topological order\n reverse_topo_order = reversed(find_topo_sort([output_node]))\n for node in reverse_topo_order:\n output_grad = sum_node_list(node_to_output_grads_list[node])\n node_to_output_grad[node] = output_grad\n input_grads_list = node.op.gradient(node, output_grad)\n #print len(node.name)\n #print len(node.inputs)\n #raw_input(\"\\n\\nPress the enter key to exit.\")\n for i in range(len(node.inputs)):\n if node.inputs[i] not in node_to_output_grads_list:\n node_to_output_grads_list[node.inputs[i]] = []\n # Calculate partial adjoint for input nodes.\n node_to_output_grads_list[node.inputs[i]].append(\n input_grads_list[i])\n if scheduler_policy == 'swap':\n for node in node_list:\n if node.swap:\n node_to_output_grad[node].swap = True\n # grad_node_list = [node_to_output_grad[node] for node in node_list]\n grad_node_list = [distributed_communicate_op(\n node_to_output_grad[node]) for node in node_list]\n return grad_node_list\n\n##################\n# Helper Methods #\n##################\n\n\ndef find_topo_sort(node_list):\n \"\"\"Given a list of nodes, return a topo ordering of nodes ending in them.\n\n A simple algorithm is to do a post-order DFS traversal on the given nodes,\n going backwards based on input edges. Since a node is added to the ordering\n after all its predecessors are traversed due to post-order DFS, we get a\n topological sort.\n\n \"\"\"\n visited = set()\n topo_order = []\n for node in node_list:\n topo_sort_dfs(node, visited, topo_order)\n return topo_order\n\n\ndef topo_sort_dfs(node, visited, topo_order):\n \"\"\"Post-order DFS\"\"\"\n if node in visited:\n return\n visited.add(node)\n for n in node.inputs:\n topo_sort_dfs(n, visited, topo_order)\n topo_order.append(node)\n\n\ndef sum_node_list(node_list):\n \"\"\"Custom sum func to avoid creating redundant nodes in Python sum func.\"\"\"\n from operator import add\n from functools import reduce\n return reduce(add, node_list)\n\n\ndef broadcast_rule(shape_a, shape_b):\n \"\"\"Return output shape of broadcast shape_a, shape_b.\n e.g. broadcast_rule((3,2), (4,3,2))\n returns output_shape = (4,3,2)\n\n Check out explanations and more examples at\n https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html\n http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/\n \"\"\"\n assert(isinstance(shape_a, tuple))\n assert(isinstance(shape_b, tuple))\n if len(shape_a) > len(shape_b):\n longer_shape, shorter_shape = shape_a, shape_b\n else:\n longer_shape, shorter_shape = shape_b, shape_a\n len_diff = len(longer_shape) - len(shorter_shape)\n for i in range(len_diff):\n # pad with leading 1s\n shorter_shape = (1,) + shorter_shape\n assert len(shorter_shape) == len(longer_shape)\n output_shape = list(longer_shape)\n for i in range(len(output_shape)):\n assert (shorter_shape[i] == longer_shape[i]) \\\n or (shorter_shape[i] == 1) \\\n or (longer_shape[i] == 1)\n output_shape[i] = max(shorter_shape[i], longer_shape[i])\n return tuple(output_shape)\n"
] |
[
[
"numpy.empty"
]
] |
unnir/tensorflow
|
[
"656b2fe018a7940595121ea08d4a1ddf29fa65d0",
"656b2fe018a7940595121ea08d4a1ddf29fa65d0"
] |
[
"tensorflow/contrib/distribute/python/keras_test.py",
"tensorflow/contrib/distribute/python/one_device_strategy_test.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.keras models using DistributionStrategy.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.contrib.distribute.python import combinations\nfrom tensorflow.contrib.distribute.python import mirrored_strategy\nfrom tensorflow.contrib.distribute.python import tpu_strategy\nfrom tensorflow.contrib.distribute.python import values\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.estimator import keras as keras_lib\nfrom tensorflow.python.estimator import run_config as run_config_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import distributed_training_utils\nfrom tensorflow.python.ops.parsing_ops import gen_parsing_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import rmsprop\n\n\n_RANDOM_SEED = 1337\n_TRAIN_SIZE = 200\n_INPUT_SIZE = (10,)\n_NUM_CLASS = 2\n\n\n# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as\n# part of the tf.keras unit tests suite.\ndef simple_sequential_model():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))\n model.add(keras.layers.Dropout(0.1))\n model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))\n return model\n\n\ndef simple_functional_model():\n a = keras.layers.Input(shape=_INPUT_SIZE)\n b = keras.layers.Dense(16, activation='relu')(a)\n b = keras.layers.Dropout(0.1)(b)\n b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)\n model = keras.models.Model(inputs=[a], outputs=[b])\n return model\n\n\ndef multi_inputs_multi_outputs_model():\n input_a = keras.layers.Input(shape=(16,), name='input_a')\n input_b = keras.layers.Input(shape=(16,), name='input_b')\n input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')\n dense = keras.layers.Dense(8, name='dense_1')\n\n interm_a = dense(input_a)\n # Read m\n interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)\n interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])\n interm_b = dense(input_b)\n merged = keras.layers.concatenate([interm_s, interm_b], name='merge')\n output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)\n output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)\n model = keras.models.Model(\n inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])\n model.compile(\n loss='categorical_crossentropy',\n optimizer=gradient_descent.GradientDescentOptimizer(0.001),\n metrics={\n 'dense_2': 'categorical_accuracy',\n 'dense_3': 'categorical_accuracy'\n })\n return model\n\n\ndef get_ds_train_input_fn():\n np.random.seed(_RANDOM_SEED)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=_INPUT_SIZE,\n num_classes=_NUM_CLASS)\n y_train = keras.utils.to_categorical(y_train)\n\n dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))\n dataset = dataset.batch(32)\n return dataset\n\n\ndef get_ds_test_input_fn():\n np.random.seed(_RANDOM_SEED)\n _, (x_test, y_test) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=_INPUT_SIZE,\n num_classes=_NUM_CLASS)\n y_test = keras.utils.to_categorical(y_test)\n\n dataset = dataset_ops.Dataset.from_tensor_slices((x_test, y_test))\n dataset = dataset.batch(32)\n return dataset\n\n\ndef get_multi_inputs_multi_outputs_data():\n (a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(16,),\n num_classes=3,\n random_seed=_RANDOM_SEED)\n (b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(16,),\n num_classes=2,\n random_seed=_RANDOM_SEED)\n (m_train, _), (m_test, _) = testing_utils.get_test_data(\n train_samples=_TRAIN_SIZE,\n test_samples=50,\n input_shape=(8,),\n num_classes=2,\n random_seed=_RANDOM_SEED)\n\n c_train = keras.utils.to_categorical(c_train)\n c_test = keras.utils.to_categorical(c_test)\n d_train = keras.utils.to_categorical(d_train)\n d_test = keras.utils.to_categorical(d_test)\n\n train_data = {\n 'input_a': a_train,\n 'input_b': b_train,\n 'input_m': m_train,\n 'output_c': c_train,\n 'output_d': d_train\n }\n test_data = {\n 'input_a': a_test,\n 'input_b': b_test,\n 'input_m': m_test,\n 'output_c': c_test,\n 'output_d': d_test\n }\n\n return (train_data, test_data)\n\n\ndef batch_wrapper(dataset, batch_size, distribution):\n # TPUs currently require fully defined input shapes, drop_remainder ensures\n # the input will have fully defined shapes.\n if isinstance(distribution, tpu_strategy.TPUStrategy):\n return dataset.batch(batch_size, drop_remainder=True)\n else:\n return dataset.batch(batch_size)\n\n\ndef get_model():\n x = keras.layers.Input(shape=(3,), name='input')\n y = keras.layers.Dense(4, name='dense')(x)\n model = keras.Model(x, y)\n return model\n\n\ndef get_dataset(distribution):\n inputs = np.zeros((10, 3), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = batch_wrapper(dataset, 10, distribution)\n return dataset\n\n\ndef get_predict_dataset(distribution):\n inputs = np.zeros((10, 3), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices(inputs)\n dataset = dataset.repeat(100)\n dataset = batch_wrapper(dataset, 10, distribution)\n return dataset\n\n\ndef multi_input_output_model():\n a = keras.layers.Input(shape=(3,), name='input_a')\n b = keras.layers.Input(shape=(5,), name='input_b')\n # TODO(anjalisridhar): Change the output dimension of the second Dense layer\n # once the iterator output validation issue has been fixed.\n dense_1 = keras.layers.Dense(7, name='dense_1')\n dense_2 = keras.layers.Dense(7, name='dense_2')\n c = dense_1(a)\n d = dense_2(b)\n e = keras.layers.Dropout(0.5, name='dropout')(c)\n model = keras.models.Model([a, b], [d, e])\n return model\n\n\ndef get_correctness_test_inputs(use_numpy, with_distribution,\n x_train, y_train, x_predict):\n \"\"\"Generates the inputs for correctness check when enable Keras with DS.\"\"\"\n global_batch_size = 64\n batch_size = global_batch_size\n # TODO(b/118776054): Use global batch size for Keras/DS support.\n use_per_core_batch_size = (\n with_distribution and\n with_distribution.__class__.__name__ != 'TPUStrategy')\n if use_per_core_batch_size:\n batch_size //= with_distribution.num_replicas_in_sync\n\n if use_numpy:\n training_inputs = {\n 'batch_size': batch_size,\n 'x': x_train,\n 'y': y_train,\n 'epochs': 1,\n 'shuffle': False,\n }\n eval_inputs = {\n 'batch_size': batch_size,\n 'x': x_train,\n 'y': y_train,\n }\n # TODO(b/119318587): We should not require batch_size when distribution\n # is enabled.\n if with_distribution:\n if use_per_core_batch_size:\n predict_batch_size = (\n len(x_predict) // with_distribution.num_replicas_in_sync)\n else:\n predict_batch_size = len(x_predict)\n else:\n predict_batch_size = None\n\n predict_inputs = {\n 'batch_size': predict_batch_size,\n 'x': np.array(x_predict, dtype=np.float32),\n }\n else:\n # For dataset inputs, we do not pass batch_size to\n # keras.fit/evaluate/predict. The batch size is part of the dataset.\n train_dataset = dataset_ops.Dataset.from_tensor_slices(\n (x_train, y_train))\n x = batch_wrapper(train_dataset, batch_size, with_distribution)\n\n training_inputs = {\n 'batch_size': None,\n 'x': x,\n 'y': None,\n 'epochs': 1,\n 'shuffle': False,\n 'steps_per_epoch': len(x_train) // global_batch_size,\n }\n eval_inputs = {\n 'batch_size': None,\n 'x': x,\n 'y': None,\n 'steps': 20,\n }\n predict_batch_size = len(x_predict)\n if use_per_core_batch_size:\n predict_batch_size //= with_distribution.num_replicas_in_sync\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)\n predict_dataset = batch_wrapper(predict_dataset,\n predict_batch_size, with_distribution)\n predict_inputs = {\n 'batch_size': None,\n 'steps': 1,\n 'x': predict_dataset,\n }\n\n return training_inputs, eval_inputs, predict_inputs\n\n\nstrategies = [combinations.default_strategy,\n combinations.one_device_strategy,\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.mirrored_strategy_with_two_gpus,\n combinations.tpu_strategy, # steps_per_run=2\n combinations.tpu_strategy_one_step]\n\n\ndef strategy_minus_tpu_combinations():\n return combinations.combine(\n distribution=[combinations.default_strategy,\n combinations.one_device_strategy,\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.mirrored_strategy_with_two_gpus],\n mode=['graph'])\n\n\ndef strategy_combinations():\n return combinations.combine(\n distribution=strategies,\n mode=['graph'])\n\n\ndef strategy_and_optimizer_combinations():\n return combinations.combine(\n distribution=strategies,\n optimizer=[combinations.adagrad_optimizer_v1_fn,\n combinations.adam_optimizer_v1_fn,\n combinations.gradient_descent_optimizer_v1_fn,\n combinations.rmsprop_optimizer_v1_fn],\n mode=['graph'])\n\n\ndef strategy_and_inputs():\n return combinations.combine(\n distribution=strategies,\n use_numpy=[True, False],\n mode=['graph'])\n\n\nclass TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._base_dir = os.path.join(self.get_temp_dir(),\n 'keras_mirrored_strategy_test')\n gfile.MakeDirs(self._base_dir)\n self._config = run_config_lib.RunConfig(\n tf_random_seed=_RANDOM_SEED, model_dir=self._base_dir)\n self._dist = mirrored_strategy.MirroredStrategy(\n devices=['/device:GPU:0', '/device:GPU:1'])\n\n def tearDown(self):\n writer_cache.FileWriterCache.clear()\n if os.path.isdir(self._base_dir):\n gfile.DeleteRecursively(self._base_dir)\n\n def test_train_functional_with_distribution_strategy(self):\n dist = mirrored_strategy.MirroredStrategy(\n devices=['/device:GPU:0', '/device:GPU:1'])\n keras_model = simple_functional_model()\n keras_model.compile(\n loss='categorical_crossentropy',\n metrics=[keras.metrics.CategoricalAccuracy()],\n optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))\n config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,\n model_dir=self._base_dir,\n train_distribute=dist,\n eval_distribute=dist)\n with self.cached_session():\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, config=config)\n before_eval_results = est_keras.evaluate(\n input_fn=get_ds_test_input_fn, steps=1)\n est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)\n after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,\n steps=1)\n self.assertLess(after_eval_results['loss'], before_eval_results['loss'])\n\n writer_cache.FileWriterCache.clear()\n gfile.DeleteRecursively(self._config.model_dir)\n\n def test_train_sequential_with_distribution_strategy(self):\n dist = mirrored_strategy.MirroredStrategy(\n devices=['/device:GPU:0', '/device:GPU:1'])\n keras_model = simple_sequential_model()\n keras_model.compile(\n loss='categorical_crossentropy',\n metrics=[keras.metrics.CategoricalAccuracy()],\n optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))\n config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,\n model_dir=self._base_dir,\n train_distribute=dist)\n with self.cached_session():\n est_keras = keras_lib.model_to_estimator(\n keras_model=keras_model, config=config)\n before_eval_results = est_keras.evaluate(\n input_fn=get_ds_test_input_fn, steps=1)\n est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)\n after_eval_results = est_keras.evaluate(input_fn=get_ds_test_input_fn,\n steps=1)\n self.assertLess(after_eval_results['loss'], before_eval_results['loss'])\n\n writer_cache.FileWriterCache.clear()\n gfile.DeleteRecursively(self._config.model_dir)\n\n def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self):\n train_data, test_data = get_multi_inputs_multi_outputs_data()\n\n def train_input_fn():\n input_dict = {\n 'input_a': train_data['input_a'],\n 'input_b': train_data['input_b'],\n 'input_m': train_data['input_m'].astype(np.str)\n }\n output_dict = {\n 'dense_2': train_data['output_c'],\n 'dense_3': train_data['output_d']\n }\n return dataset_ops.Dataset.from_tensor_slices((input_dict,\n output_dict)).batch(16)\n\n def eval_input_fn():\n input_dict = {\n 'input_a': test_data['input_a'],\n 'input_b': test_data['input_b'],\n 'input_m': test_data['input_m'].astype(np.str)\n }\n output_dict = {\n 'dense_2': test_data['output_c'],\n 'dense_3': test_data['output_d']\n }\n return dataset_ops.Dataset.from_tensor_slices((input_dict,\n output_dict)).batch(16)\n\n self.do_test_multi_inputs_multi_outputs_with_input_fn(\n train_input_fn, eval_input_fn)\n\n def do_test_multi_inputs_multi_outputs_with_input_fn(self, train_input_fn,\n eval_input_fn):\n config = run_config_lib.RunConfig(\n tf_random_seed=_RANDOM_SEED,\n model_dir=self._base_dir,\n train_distribute=self._dist)\n with self.cached_session():\n model = multi_inputs_multi_outputs_model()\n est_keras = keras_lib.model_to_estimator(keras_model=model, config=config)\n baseline_eval_results = est_keras.evaluate(\n input_fn=eval_input_fn, steps=1)\n est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)\n eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)\n self.assertLess(eval_results['loss'], baseline_eval_results['loss'])\n\n def test_keras_optimizer_with_distribution_strategy(self):\n dist = mirrored_strategy.MirroredStrategy(\n devices=['/device:GPU:0', '/device:GPU:1'])\n keras_model = simple_sequential_model()\n keras_model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.rmsprop(lr=0.01))\n\n config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,\n model_dir=self._base_dir,\n train_distribute=dist)\n with self.cached_session():\n est_keras = keras_lib.model_to_estimator(keras_model=keras_model,\n config=config)\n with self.assertRaisesRegexp(ValueError,\n 'Only TensorFlow native optimizers are '\n 'supported with DistributionStrategy.'):\n est_keras.train(input_fn=get_ds_train_input_fn, steps=_TRAIN_SIZE / 16)\n\n writer_cache.FileWriterCache.clear()\n gfile.DeleteRecursively(self._config.model_dir)\n\n\nclass TestDistributionStrategyWithNumpyArrays(test.TestCase,\n parameterized.TestCase):\n\n @combinations.generate(strategy_combinations())\n def test_creating_var_with_numpy_arrays(self, distribution):\n with self.cached_session():\n x = np.asarray(np.random.random((64, 3)), dtype=np.float32)\n var_x = distributed_training_utils.get_var_for_numpy(distribution, x)\n val = self.evaluate(var_x.value())\n # Verify that the numpy value is copied to the variable.\n self.assertAllEqual(x, val)\n\n def test_calculating_batch_params(self):\n # This verifies that we calculate the number of steps when the batch size\n # is specified.\n with self.cached_session():\n # 64 is the number of input samples.\n inputs = np.zeros((64, 3), dtype=np.float32)\n # The number of replicas is equal to 3.\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',\n '/device:CPU:0',\n '/device:GPU:1'])\n\n with self.assertRaisesRegexp(ValueError, 'Please specify a batch_size '\n 'that is smaller than'):\n # The batch size(128) is larger than the number of input\n # samples(64).\n distributed_training_utils.get_input_batch_params(inputs,\n 128,\n strategy)\n\n with self.assertRaisesRegexp(ValueError, 'is smaller than the number '\n 'of replicas'):\n # The batch size(32) * num_replicas_in_sync(3) is 96 which is greater\n # than the number of input samples(64).\n distributed_training_utils.get_input_batch_params(inputs,\n 32,\n strategy)\n\n # The number of replicas now is equal to 2.\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',\n '/device:CPU:0'])\n # 32 is the batch size per replica.\n steps = distributed_training_utils.get_input_batch_params(inputs,\n 32,\n strategy)\n # The number of batches is the ratio of input samples(64) to\n # batch size(32) which is 2. The number of steps(1) is the ratio of\n # number of batches(2) to the number of replicas(2).\n self.assertEqual(steps, 1)\n\n # 16 is the batch size per replica.\n steps = distributed_training_utils.get_input_batch_params(inputs,\n 16,\n strategy)\n # The number of batches is the ratio of input samples(64) to\n # batch size(16) which is 4. The number of steps(2) is the ratio of\n # number of batches(4) to the number of replicas(2).\n self.assertEqual(steps, 2)\n\n def test_calculating_batch_size(self):\n with self.cached_session():\n # 64 is the number of input samples.\n inputs = np.zeros((64, 3), dtype=np.float32)\n targets = np.zeros((64, 4), dtype=np.float32)\n\n model = get_model()\n optimizer = gradient_descent.GradientDescentOptimizer(0.001)\n loss = 'mse'\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',\n '/device:CPU:0'])\n strategy._require_static_shapes = True\n\n model.compile(optimizer, loss, distribute=strategy)\n iterator = model._distribution_standardize_user_data(inputs,\n targets,\n batch_size=None,\n check_steps=True,\n steps_name='steps',\n steps=3)\n\n # The global batch size(21) across all replicas is the ratio of the input\n # samples(64) to the steps(3).\n # The batch size(10) per device is the ratio of the global batch size(21)\n # to the number of replicas(2).\n # The global batch size and batch size are rounded integer values.\n self.assertEqual(10, distributed_training_utils.get_batch_dimension(\n iterator._iterator))\n\n @combinations.generate(strategy_combinations())\n def test_calling_model_with_numpy_arrays(self, distribution):\n with self.cached_session():\n model = get_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.001)\n loss = 'mse'\n metrics = ['mae']\n model.compile(optimizer, loss, metrics=metrics, distribute=distribution)\n\n inputs = np.zeros((64, 3), dtype=np.float32)\n targets = np.zeros((64, 4), dtype=np.float32)\n\n # Call fit with validation data\n model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,\n validation_data=(inputs, targets))\n\n # TODO(anjalisridhar): We need tests for when the batch size and steps are\n # smaller and results in a 0 batch_size and steps value.\n model.evaluate(inputs, targets)\n # with steps\n model.evaluate(inputs, targets, steps=2)\n # with batch_size\n model.evaluate(inputs, targets, batch_size=8)\n\n model.predict(inputs)\n # with steps\n model.predict(inputs, steps=2)\n # with batch_size\n model.predict(inputs, batch_size=8)\n\n @combinations.generate(strategy_combinations())\n def test_calling_model_with_nested_numpy_arrays(self, distribution):\n with self.cached_session():\n model = multi_input_output_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)\n loss = 'mse'\n model.compile(optimizer, loss, distribute=distribution)\n\n input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)\n input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)\n inputs = [input_a_np, input_b_np]\n\n output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)\n output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)\n targets = [output_d_np, output_e_np]\n\n # Call fit with validation data\n model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)\n\n # TODO(anjalisridhar): We need tests for when the batch size and steps are\n # smaller and results in a 0 batch_size and steps value.\n model.evaluate(inputs, targets)\n # with steps\n model.evaluate(inputs, targets, steps=2)\n # with batch_size\n model.evaluate(inputs, targets, batch_size=8)\n\n model.predict(inputs)\n # with steps\n model.predict(inputs, steps=2)\n # with batch_size\n model.predict(inputs, batch_size=8)\n\n @combinations.generate(strategy_minus_tpu_combinations())\n def test_numpy_with_sample_weights(self, distribution):\n model = get_model()\n optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)\n loss = 'mse'\n model.compile(optimizer, loss, distribute=distribution)\n\n inputs = np.zeros((10, 3), np.float32)\n targets = np.zeros((10, 4), np.float32)\n sample_weights = np.ones((10), np.float32)\n\n model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,\n steps_per_epoch=2, verbose=1)\n\n @combinations.generate(strategy_combinations())\n def test_flatten_predict_outputs(self, distribution):\n with self.cached_session():\n model = multi_input_output_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)\n loss = 'mse'\n model.compile(optimizer, loss, distribute=distribution)\n\n # We take 6 input samples with each input having a dimension of 3 or 5.\n input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)\n input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)\n inputs = [input_a_np, input_b_np]\n\n outs = model.predict(inputs, steps=1)\n # `predict` a list that is equal in length to the number of model outputs.\n # In this test our model has two outputs and each element of `outs`\n # corresponds to all the samples of one of the model outputs.\n self.assertEqual(2, len(outs))\n # Each of the output samples have a dimension of 7. We should process all\n # the available input samples(6).\n self.assertAllEqual([6, 7], outs[0].shape)\n self.assertAllEqual([6, 7], outs[1].shape)\n\n\nclass TestDistributionStrategyWithDatasets(test.TestCase,\n parameterized.TestCase):\n\n @combinations.generate(strategy_combinations())\n def test_calling_model_on_same_dataset(self, distribution):\n with self.cached_session():\n model = get_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.001)\n loss = 'mse'\n metrics = ['mae', keras.metrics.CategoricalAccuracy()]\n model.compile(optimizer, loss, metrics=metrics, distribute=distribution)\n\n dataset = get_dataset(distribution)\n\n # Call fit with validation data\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n validation_data=dataset, validation_steps=2)\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n validation_data=dataset, validation_steps=2)\n model.predict(get_predict_dataset(distribution), steps=2)\n\n @combinations.generate(strategy_combinations())\n def test_model_interleaved_eval_same_as_direct_eval(self, distribution):\n with self.cached_session():\n user_controlled_model = get_model()\n user_controlled_model.compile(\n gradient_descent.GradientDescentOptimizer(0.001),\n loss='mse',\n metrics=['mae', keras.metrics.CategoricalAccuracy()],\n distribute=distribution)\n\n interleaved_model = get_model()\n interleaved_model.set_weights(user_controlled_model.get_weights())\n interleaved_model.compile(\n gradient_descent.GradientDescentOptimizer(0.001),\n loss='mse',\n metrics=['mae', keras.metrics.CategoricalAccuracy()],\n distribute=distribution)\n\n dataset = get_dataset(distribution)\n\n # Call fit with validation interleaved\n interleaved_output = interleaved_model.fit(\n dataset, epochs=2, steps_per_epoch=2, verbose=1,\n validation_data=dataset, validation_steps=2, shuffle=False)\n\n # Manually control the validation running after each epoch.\n user_controlled_output = []\n for _ in range(2):\n user_controlled_model.fit(\n dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)\n user_controlled_output.append(\n user_controlled_model.evaluate(dataset, steps=2))\n\n self.assertEqual(interleaved_output.history['val_loss'],\n [x[0] for x in user_controlled_output])\n self.assertEqual(interleaved_output.history['val_mean_absolute_error'],\n [x[1] for x in user_controlled_output])\n self.assertEqual(interleaved_output.history['val_categorical_accuracy'],\n [x[2] for x in user_controlled_output])\n\n # TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work\n # as clone_model's input_tensors argument only seems to accept list and not\n # tuples or dict.\n def test_fit_with_tuple_and_dict_dataset_inputs(self):\n with self.cached_session():\n model = multi_input_output_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)\n loss = 'mse'\n metrics = ['mae', keras.metrics.CategoricalAccuracy()]\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',\n '/device:CPU:0'])\n model.compile(optimizer, loss, metrics=metrics, distribute=strategy)\n\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 5))\n output_d_np = np.random.random((10, 7))\n output_e_np = np.random.random((10, 7))\n\n # Test with tuples\n dataset_tuple = dataset_ops.Dataset.from_tensor_slices((\n (input_a_np, input_b_np), (output_d_np, output_e_np)))\n dataset_tuple = dataset_tuple.repeat(100)\n dataset_tuple = dataset_tuple.batch(10)\n\n model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)\n\n # Test with dict\n dataset_dict = dataset_ops.Dataset.from_tensor_slices((\n {'input_a': input_a_np, 'input_b': input_b_np},\n (output_d_np, output_e_np)))\n dataset_dict = dataset_dict.repeat(100)\n dataset_dict = dataset_dict.batch(10)\n\n model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)\n\n @combinations.generate(strategy_combinations())\n def test_fit_eval_and_predict_methods_on_dataset(self, distribution):\n with self.cached_session():\n model = get_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.001)\n loss = 'mse'\n metrics = ['mae', keras.metrics.CategoricalAccuracy()]\n model.compile(optimizer, loss, metrics=metrics, distribute=distribution)\n\n dataset = get_dataset(distribution)\n\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset, steps=2, verbose=1)\n model.predict(get_predict_dataset(distribution), steps=2)\n\n @combinations.generate(strategy_and_optimizer_combinations())\n def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):\n with self.cached_session():\n model = get_model()\n\n loss = 'mse'\n model.compile(optimizer(), loss, distribute=distribution)\n\n dataset = get_dataset(distribution)\n\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset, steps=2, verbose=1)\n model.predict(get_predict_dataset(distribution), steps=2)\n\n @combinations.generate(strategy_minus_tpu_combinations())\n def test_dataset_with_sample_weights(self, distribution):\n model = get_model()\n optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)\n loss = 'mse'\n model.compile(optimizer, loss, distribute=distribution)\n\n inputs = np.zeros((10, 3), np.float32)\n targets = np.zeros((10, 4), np.float32)\n sample_weights = np.ones((10), np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,\n sample_weights))\n dataset = dataset.repeat()\n dataset = dataset.batch(10)\n\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset, steps=2, verbose=1)\n model.predict(dataset, steps=2)\n\n def test_dataset_input_shape_validation(self):\n with self.cached_session():\n model = get_model()\n\n optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)\n loss = 'mse'\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',\n '/device:GPU:0'])\n\n model.compile(optimizer, loss, distribute=strategy)\n\n # User forgets to batch the dataset\n inputs = np.zeros((10, 3), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n\n with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)\n\n # Wrong input shape\n inputs = np.zeros((10, 5), dtype=np.float32)\n targets = np.zeros((10, 4), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n\n with self.assertRaisesRegexp(ValueError,\n 'expected input to have shape'):\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)\n\n @combinations.generate(combinations.combine(\n distribution=[combinations.tpu_strategy_one_step],\n mode=['graph']))\n def test_dataset_input_shape_fully_defined(self, distribution):\n with self.cached_session():\n model = get_model()\n\n optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)\n loss = 'mse'\n model.compile(optimizer, loss, distribute=distribution)\n\n dataset = get_dataset(distribution)\n # Input shapes are not fully known. Batch dimension is unknown as we are\n # not using the drop_remainder argument.\n dataset = dataset.repeat(100).batch(10)\n\n with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)\n\n def test_learning_phase_value(self):\n # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare\n # meaningful values. Currently we don't pass the learning phase if the\n # Lambda layer uses the learning phase.\n with self.cached_session():\n x = keras.layers.Input(shape=(1,), name='input')\n y = keras.layers.Dense(1, kernel_initializer='ones')(x)\n z = keras.layers.Dropout(0.9999)(y)\n model = keras.Model(x, z)\n initial_weights = model.get_weights()\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.005)\n loss = 'mse'\n metrics = ['acc']\n strategy = mirrored_strategy.MirroredStrategy(\n ['/device:GPU:0', '/device:GPU:1'])\n\n model.compile(optimizer, loss, metrics=metrics, distribute=strategy)\n\n inputs = np.ones((10, 1), dtype=np.float32)\n targets = np.ones((10, 1), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat().batch(8)\n hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)\n self.assertAlmostEqual(hist.history['acc'][0], 0, 0)\n\n model.set_weights(initial_weights)\n # TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.\n # evaluate_output = model.evaluate(dataset, steps=20)\n # self.assertAlmostEqual(evaluate_output[1], 1, 0)\n\n inputs = np.ones((10, 1), dtype=np.float32)\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)\n predict_dataset = predict_dataset.repeat().batch(5)\n output = model.predict(predict_dataset, steps=10)\n # `predict` runs for 10 steps and in each step you process 10 samples.\n ref_output = np.ones((100, 1), dtype=np.float32)\n self.assertArrayNear(output, ref_output, 1e-1)\n\n\nclass TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):\n\n def test_validating_dataset_input_tensors_with_shape_mismatch(self):\n with self.cached_session():\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',\n '/device:CPU:0'])\n a = constant_op.constant([1, 2], shape=(1, 2))\n b = constant_op.constant([[1, 2], [1, 2]], shape=(2, 2))\n x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})\n y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})\n with strategy.scope():\n # Removed device and input tensor shape details from the error message\n # since the order of the device and the corresponding input tensor shape\n # is not deterministic over different runs.\n with self.assertRaisesRegexp(ValueError,\n 'Input tensor shapes do not match for '\n 'distributed tensor inputs '\n 'DistributedValues:.+'):\n distributed_training_utils.validate_distributed_dataset_inputs(\n strategy, x, y)\n\n def test_validating_dataset_input_tensors_with_dtype_mismatch(self):\n with self.cached_session():\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:0',\n '/device:CPU:0'])\n a = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.int32)\n b = constant_op.constant([1, 2], shape=(1, 2), dtype=dtypes.float64)\n x = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': b})\n y = values.DistributedValues({'/device:CPU:0': a, '/device:GPU:0': a})\n with strategy.scope():\n # Removed device and input tensor dtype details from the error message\n # since the order of the device and the corresponding input tensor dtype\n # is not deterministic over different runs.\n with self.assertRaisesRegexp(ValueError,\n 'Input tensor dtypes do not match for '\n 'distributed tensor inputs '\n 'DistributedValues:.+'):\n distributed_training_utils.validate_distributed_dataset_inputs(\n strategy, x, y)\n\n def test_unsupported_features(self):\n with self.cached_session():\n model = get_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.001)\n loss = 'mse'\n metrics = ['mae']\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',\n '/device:GPU:0'])\n\n model.compile(optimizer, loss, metrics=metrics, distribute=strategy)\n\n dataset = get_dataset(strategy)\n\n # Test with validation split\n with self.assertRaisesRegexp(\n ValueError, '`validation_split` argument is not '\n 'supported when input `x` is a dataset or a '\n 'dataset iterator.+'):\n model.fit(dataset,\n epochs=1, steps_per_epoch=2, verbose=0,\n validation_split=0.5, validation_steps=2)\n\n # Test with sample weight.\n sample_weight = np.random.random((10,))\n with self.assertRaisesRegexp(\n ValueError, '`sample_weight` argument is not supported when input '\n '`x` is a dataset or a dataset iterator.'):\n model.fit(\n dataset,\n epochs=1,\n steps_per_epoch=2,\n verbose=0,\n sample_weight=sample_weight)\n\n # Test with not specifying the `steps` argument.\n with self.assertRaisesRegexp(\n ValueError, 'you should specify the `steps_per_epoch` argument'):\n model.fit(dataset, epochs=1, verbose=0)\n with self.assertRaisesRegexp(ValueError,\n 'you should specify the `steps` argument'):\n model.evaluate(dataset, verbose=0)\n\n with self.assertRaisesRegexp(ValueError,\n 'you should specify the `steps` argument'):\n model.predict(dataset, verbose=0)\n\n def test_calling_with_unsupported_predefined_callbacks(self):\n with self.cached_session():\n model = get_model()\n\n optimizer = gradient_descent.GradientDescentOptimizer(0.001)\n loss = 'mse'\n metrics = ['mae']\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',\n '/device:GPU:0'])\n model.compile(optimizer, loss, metrics=metrics, distribute=strategy)\n\n dataset = get_dataset(strategy)\n\n def schedule(_):\n return 0.001\n with self.assertRaisesRegexp(ValueError,\n 'LearningRateScheduler callback is not '\n 'supported with DistributionStrategy.'):\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n callbacks=[keras.callbacks.LearningRateScheduler(schedule)])\n\n with self.assertRaisesRegexp(ValueError,\n 'ReduceLROnPlateau callback is not '\n 'supported with DistributionStrategy.'):\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n callbacks=[keras.callbacks.ReduceLROnPlateau()])\n with self.assertRaisesRegexp(ValueError,\n 'histogram_freq in the TensorBoard callback '\n 'is not supported when using '\n 'DistributionStrategy.'):\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n callbacks=[keras.callbacks.TensorBoard(histogram_freq=10)])\n\n\nclass TestDistributionStrategyWithLossMasking(test.TestCase):\n\n # TODO(priyag): Enable all strategies for this test. Currently it does not\n # work for TPU due to some invalid datatype.\n def test_masking(self):\n with self.cached_session():\n np.random.seed(1337)\n x = np.array([[[1], [1]], [[0], [0]]])\n model = keras.models.Sequential()\n model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))\n model.add(\n keras.layers.TimeDistributed(\n keras.layers.Dense(1, kernel_initializer='one')))\n strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',\n '/device:GPU:0'])\n\n model.compile(loss='mse',\n optimizer=gradient_descent.GradientDescentOptimizer(0.01),\n distribute=strategy)\n y = np.array([[[1], [1]], [[1], [1]]])\n dataset = dataset_ops.Dataset.from_tensor_slices((x, y))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n hist = model.fit(x=dataset, epochs=1, steps_per_epoch=2)\n self.assertEqual(hist.history['loss'][0], 0)\n\n\nclass TestDistributionStrategyWithNormalizationLayer(\n test.TestCase, parameterized.TestCase):\n\n @combinations.generate(strategy_combinations())\n def test_batchnorm_correctness(self, distribution):\n with self.cached_session():\n model = keras.models.Sequential()\n norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)\n model.add(norm)\n model.compile(loss='mse',\n optimizer=gradient_descent.GradientDescentOptimizer(0.01),\n distribute=distribution)\n\n # centered on 5.0, variance 10.0\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))\n x = x.astype('float32')\n dataset = dataset_ops.Dataset.from_tensor_slices((x, x))\n dataset = dataset.repeat(100)\n dataset = batch_wrapper(dataset, 32, distribution)\n\n predict_dataset = dataset_ops.Dataset.from_tensor_slices(x)\n predict_dataset = predict_dataset.repeat(100)\n predict_dataset = batch_wrapper(predict_dataset, 32, distribution)\n\n model.fit(dataset, epochs=4, verbose=0, steps_per_epoch=10)\n out = model.predict(predict_dataset, steps=2)\n out -= keras.backend.eval(norm.beta)\n out /= keras.backend.eval(norm.gamma)\n np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)\n np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)\n\n\nclass TestDistributionStrategyCorrectness(test.TestCase,\n parameterized.TestCase):\n\n @combinations.generate(strategy_combinations())\n def test_metric_correctness(self, distribution):\n with self.cached_session():\n keras.backend.set_image_data_format('channels_last')\n num_samples = 10000\n\n x_train = np.random.randint(0, 2, num_samples)\n x_train = np.reshape(x_train, (num_samples, 1))\n y_train = x_train\n x_train = x_train.astype('float32')\n y_train = y_train.astype('float32')\n\n # Create identity model.\n model = keras.Sequential()\n model.add(\n keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))\n model.compile(\n loss=keras.losses.mean_squared_error,\n optimizer=gradient_descent.GradientDescentOptimizer(0.5),\n metrics=[keras.metrics.BinaryAccuracy()],\n distribute=distribution)\n\n batch_size = 64\n batch_size //= distribution.num_replicas_in_sync\n train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))\n train_dataset = batch_wrapper(train_dataset, batch_size, distribution)\n\n history = model.fit(x=train_dataset, epochs=1, steps_per_epoch=10)\n self.assertEqual(history.history['binary_accuracy'], [1.0])\n\n @combinations.generate(strategy_and_inputs())\n def test_correctness(self, distribution, use_numpy):\n with self.cached_session():\n tolerance = 1e-5\n\n if isinstance(distribution, mirrored_strategy.MirroredStrategy):\n # TODO(b/119257215): use the default one once the flakyness is fixed.\n tolerance = 1e-4\n\n keras.backend.set_image_data_format('channels_last')\n np.random.seed(_RANDOM_SEED)\n random_seed.set_random_seed(_RANDOM_SEED)\n\n # Train, eval, and predict datasets are created with the same input numpy\n # arrays.\n # TODO(xiejw): Change this back to 10000, once we support final partial\n # batch.\n num_samples = 9984\n x_train = np.random.rand(num_samples, 1)\n y_train = 3 * x_train\n x_train = x_train.astype('float32')\n y_train = y_train.astype('float32')\n x_predict = [[1.], [2.], [3.], [4.]]\n\n # The model is built once and the initial weights are saved.\n # This is used to initialize the model for both the distribution and\n # non-distribution run. In addition, we add few non-linear layers to make\n # it non-trivial.\n model = keras.Sequential()\n model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))\n model.add(keras.layers.Dense(10, activation='relu'))\n model.add(keras.layers.Dense(10, activation='relu'))\n model.add(keras.layers.Dense(1))\n initial_weights = model.get_weights()\n\n def fit_and_predict(with_distribution=None):\n # We have initialized the model to the same weight for the distribution\n # and non-distribution run.\n model.set_weights(initial_weights)\n model.compile(\n loss=keras.losses.mean_squared_error,\n optimizer=gradient_descent.GradientDescentOptimizer(0.5),\n distribute=with_distribution)\n\n training_inputs, eval_inputs, predict_inputs = (\n get_correctness_test_inputs(use_numpy, with_distribution,\n x_train, y_train, x_predict))\n\n model.fit(**training_inputs)\n eval_result = model.evaluate(**eval_inputs)\n weights = model.get_weights()\n predict_result = model.predict(**predict_inputs)\n\n return weights, eval_result, predict_result\n\n wts_with_ds, eval_with_ds, predict_with_ds = fit_and_predict(\n with_distribution=distribution)\n wts_without_ds, eval_without_ds, predict_without_ds = fit_and_predict(\n with_distribution=None)\n\n # Verify that the weights, eval results, predict outputs are the same\n # within some limits of tolerance.\n self.assertAllClose(\n wts_with_ds, wts_without_ds, atol=tolerance, rtol=tolerance)\n self.assertAllClose(\n eval_with_ds, eval_without_ds, atol=tolerance, rtol=tolerance)\n self.assertAllClose(\n predict_with_ds, predict_without_ds, atol=tolerance, rtol=tolerance)\n\n\n# TODO(priyag): Add a test for TPUStrategy with steps_per_run > 1.\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for class OneDeviceStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.distribute.python import one_device_strategy\nfrom tensorflow.contrib.distribute.python import strategy_test_lib\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import test_util\n\n\nclass OneDeviceStrategyTest(strategy_test_lib.DistributionTestBase):\n\n def _get_distribution_strategy(self):\n return one_device_strategy.OneDeviceStrategy(\"/device:CPU:0\")\n\n def testMinimizeLossEager(self):\n self._test_minimize_loss_eager(self._get_distribution_strategy())\n\n def testMinimizeLossGraph(self):\n self._test_minimize_loss_graph(self._get_distribution_strategy())\n\n def testReplicaId(self):\n self._test_replica_id(self._get_distribution_strategy())\n\n @test_util.run_in_graph_and_eager_modes\n def testCallAndMergeExceptions(self):\n self._test_call_and_merge_exceptions(self._get_distribution_strategy())\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"numpy.random.rand",
"tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear",
"tensorflow.python.keras.testing_utils.get_test_data",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.layers.Masking",
"numpy.random.random",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.python.keras.metrics.BinaryAccuracy",
"tensorflow.python.keras.models.Sequential",
"numpy.random.normal",
"tensorflow.contrib.distribute.python.combinations.combine",
"tensorflow.contrib.distribute.python.values.DistributedValues",
"numpy.random.randint",
"tensorflow.python.keras.metrics.CategoricalAccuracy",
"tensorflow.python.keras.backend.eval",
"tensorflow.python.keras.Sequential",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.keras.engine.distributed_training_utils.validate_distributed_dataset_inputs",
"tensorflow.python.keras.models.Model",
"tensorflow.python.keras.callbacks.ReduceLROnPlateau",
"numpy.array",
"numpy.reshape",
"numpy.zeros",
"tensorflow.python.keras.engine.distributed_training_utils.get_batch_dimension",
"tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy",
"tensorflow.python.keras.engine.distributed_training_utils.get_input_batch_params",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.keras.callbacks.LearningRateScheduler",
"tensorflow.python.platform.gfile.DeleteRecursively",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.estimator.keras.model_to_estimator",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.training.rmsprop.RMSPropOptimizer",
"tensorflow.python.keras.callbacks.TensorBoard",
"tensorflow.python.keras.Model",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.backend.set_image_data_format",
"tensorflow.python.keras.utils.to_categorical",
"numpy.random.seed",
"tensorflow.python.keras.engine.distributed_training_utils.get_var_for_numpy",
"tensorflow.python.keras.optimizers.rmsprop",
"numpy.ones",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.python.estimator.run_config.RunConfig",
"tensorflow.python.keras.layers.Lambda",
"tensorflow.python.keras.layers.concatenate"
],
[
"tensorflow.python.eager.test.main",
"tensorflow.contrib.distribute.python.one_device_strategy.OneDeviceStrategy"
]
] |
topsun888/tensorflow
|
[
"bad7c50b9dc9789ad7dd0a62daca40b7269841ed",
"bad7c50b9dc9789ad7dd0a62daca40b7269841ed"
] |
[
"tensorflow/contrib/learn/python/learn/monitors.py",
"tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Monitors allow user instrumentation of the training process.\n\nMonitors are useful to track training, report progress, request early\nstopping and more. Monitors use the observer pattern and notify at the following\npoints:\n - when training begins\n - before a training step\n - after a training step\n - when training ends\n\nMonitors are not intended to be reusable.\n\nThere are a few pre-defined monitors:\n - CaptureVariable: saves a variable's values\n - GraphDump: intended for debug only - saves all tensor values\n - PrintTensor: outputs one or more tensor values to log\n - SummarySaver: saves summaries to a summary writer\n - ValidationMonitor: runs model validation, by periodically calculating eval\n metrics on a separate data set; supports optional early stopping\n\nFor more specific needs, you can create custom monitors by extending one of the\nfollowing classes:\n - BaseMonitor: the base class for all monitors\n - EveryN: triggers a callback every N training steps\n\nExample:\n\n class ExampleMonitor(monitors.BaseMonitor):\n def __init__(self):\n print 'Init'\n\n def begin(self, max_steps):\n print 'Starting run. Will train until step %d.' % max_steps\n\n def end(self):\n print 'Completed run.'\n\n def step_begin(self, step):\n print 'About to run step %d...' % step\n return ['loss_1:0']\n\n def step_end(self, step, outputs):\n print 'Done running step %d. The value of \"loss\" tensor: %s' % (\n step, outputs['loss_1:0'])\n\n linear_regressor = LinearRegressor()\n example_monitor = ExampleMonitor()\n linear_regressor.fit(\n x, y, steps=2, batch_size=1, monitors=[example_monitor])\n\n@@get_default_monitors\n@@BaseMonitor\n@@CaptureVariable\n@@CheckpointSaver\n@@EveryN\n@@ExportMonitor\n@@GraphDump\n@@LoggingTrainable\n@@NanLoss\n@@PrintTensor\n@@StepCounter\n@@StopAtStep\n@@SummarySaver\n@@ValidationMonitor\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport os\nimport time\n\nimport numpy as np\nimport six\n\nfrom tensorflow.contrib.framework import deprecated_arg_values\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn import session_run_hook\nfrom tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache\nfrom tensorflow.core.framework.summary_pb2 import Summary\nfrom tensorflow.core.util.event_pb2 import SessionLog\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import summary_io\n\n\n# TODO(ptucker): Split each monitor class into a separate file.\n# TODO(ptucker): Fail if epoch or step does not monotonically increase?\nclass BaseMonitor(object):\n \"\"\"Base class for Monitors.\n\n Defines basic interfaces of Monitors.\n Monitors can either be run on all workers or, more commonly, restricted\n to run exclusively on the elected chief worker.\n \"\"\"\n\n def __init__(self):\n self._begun = False\n self._current_epoch = None\n self._current_step = None\n self._max_steps = None\n self._estimator = None\n self._estimator_locked = False\n\n @property\n def run_on_all_workers(self):\n return False\n\n def set_estimator(self, estimator):\n \"\"\"A setter called automatically by the target estimator.\n\n If the estimator is locked, this method does nothing.\n\n Args:\n estimator: the estimator that this monitor monitors.\n\n Raises:\n ValueError: if the estimator is None.\n \"\"\"\n if self._estimator_locked:\n return\n if estimator is None:\n raise ValueError(\"Missing estimator.\")\n # TODO(mdan): This should fail if called twice with the same estimator.\n self._estimator = estimator\n\n def _lock_estimator(self):\n \"\"\"Locks the estimator until _unlock_estimator is called.\"\"\"\n self._estimator_locked = True\n\n def _unlock_estimator(self):\n \"\"\"Unlocks the estimator.\"\"\"\n self._estimator_locked = False\n\n def begin(self, max_steps=None):\n \"\"\"Called at the beginning of training.\n\n When called, the default graph is the one we are executing.\n\n Args:\n max_steps: `int`, the maximum global step this training will run until.\n\n Raises:\n ValueError: if we've already begun a run.\n \"\"\"\n if self._begun:\n raise ValueError(\"begin called twice without end.\")\n self._max_steps = max_steps\n self._begun = True\n\n def end(self, session=None):\n \"\"\"Callback at the end of training/evaluation.\n\n Args:\n session: A `tf.Session` object that can be used to run ops.\n\n Raises:\n ValueError: if we've not begun a run.\n \"\"\"\n _ = session\n if not self._begun:\n raise ValueError(\"end called without begin.\")\n self._max_steps = None\n self._begun = False\n\n def epoch_begin(self, epoch):\n \"\"\"Begin epoch.\n\n Args:\n epoch: `int`, the epoch number.\n\n Raises:\n ValueError: if we've already begun an epoch, or `epoch` < 0.\n \"\"\"\n if self._current_epoch is not None:\n raise ValueError(\"epoch_begin called twice without epoch_end.\")\n if epoch < 0:\n raise ValueError(\"Invalid epoch %s.\" % epoch)\n self._current_epoch = epoch\n\n def epoch_end(self, epoch):\n \"\"\"End epoch.\n\n Args:\n epoch: `int`, the epoch number.\n\n Raises:\n ValueError: if we've not begun an epoch, or `epoch` number does not match.\n \"\"\"\n if self._current_epoch != epoch:\n raise ValueError(\n \"epoch_end expected %s but got %s.\", self._current_epoch, epoch)\n self._current_epoch = None\n\n def step_begin(self, step):\n \"\"\"Callback before training step begins.\n\n You may use this callback to request evaluation of additional tensors\n in the graph.\n\n Args:\n step: `int`, the current value of the global step.\n\n Returns:\n List of `Tensor` objects or string tensor names to be run.\n\n Raises:\n ValueError: if we've already begun a step, or `step` < 0, or\n `step` > `max_steps`.\n \"\"\"\n if (step < 0) or (\n (self._max_steps is not None) and (step > self._max_steps)):\n raise ValueError(\"Invalid step %s.\" % step)\n self._current_step = step\n return []\n\n def step_end(self, step, output): # pylint: disable=unused-argument\n \"\"\"Callback after training step finished.\n\n This callback provides access to the tensors/ops evaluated at this step,\n including the additional tensors for which evaluation was requested in\n `step_begin`.\n\n In addition, the callback has the opportunity to stop training by returning\n `True`. This is useful for early stopping, for example.\n\n Note that this method is not called if the call to `Session.run()` that\n followed the last call to `step_begin()` failed.\n\n Args:\n step: `int`, the current value of the global step.\n output: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n\n Returns:\n `bool`. True if training should stop.\n\n Raises:\n ValueError: if we've not begun a step, or `step` number does not match.\n \"\"\"\n if self._current_step != step:\n raise ValueError(\n \"step_end expected %s but got %s.\", self._current_step, step)\n self._current_step = None\n return False\n\n def post_step(self, step, session): # pylint: disable=unused-argument\n \"\"\"Callback after the step is finished.\n\n Called after step_end and receives session to perform extra session.run\n calls. If failure occurred in the process, will be called as well.\n\n Args:\n step: `int`, global step of the model.\n session: `Session` object.\n \"\"\"\n _ = step, session\n\n\ndef _extract_output(outputs, request):\n if request in outputs:\n return outputs[request]\n return outputs[request.name]\n\n\nclass EveryN(BaseMonitor):\n \"\"\"Base class for monitors that execute callbacks every N steps.\n\n This class adds three new callbacks:\n - every_n_step_begin\n - every_n_step_end\n - every_n_post_step\n\n The callbacks are executed every n steps, or optionally every step for the\n first m steps, where m and n can both be user-specified.\n\n When extending this class, note that if you wish to use any of the\n `BaseMonitor` callbacks, you must call their respective super implementation:\n\n def step_begin(self, step):\n super(ExampleMonitor, self).step_begin(step)\n return []\n\n Failing to call the super implementation will cause unpredictible behavior.\n\n The `every_n_post_step()` callback is also called after the last step if it\n was not already called through the regular conditions. Note that\n `every_n_step_begin()` and `every_n_step_end()` do not receive that special\n treatment.\n\n \"\"\"\n # TODO(ipolosukhin): Add also every n seconds.\n\n def __init__(self, every_n_steps=100, first_n_steps=1):\n \"\"\"Initializes an `EveryN` monitor.\n\n Args:\n every_n_steps: `int`, the number of steps to allow between callbacks.\n first_n_steps: `int`, specifying the number of initial steps during\n which the callbacks will always be executed, regardless of the value\n of `every_n_steps`. Note that this value is relative to the global step\n \"\"\"\n super(EveryN, self).__init__()\n self._every_n_steps = every_n_steps\n self._first_n_steps = first_n_steps\n # Last step in the model.\n self._last_successful_step = None\n # Last step at which we called one of the every_n methods\n self._last_active_step = 0\n self._every_n_step_begin_called = False\n\n def every_n_step_begin(self, step): # pylint: disable=unused-argument\n \"\"\"Callback before every n'th step begins.\n\n Args:\n step: `int`, the current value of the global step.\n\n Returns:\n A `list` of tensors that will be evaluated at this step.\n \"\"\"\n return []\n\n def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument\n \"\"\"Callback after every n'th step finished.\n\n This callback provides access to the tensors/ops evaluated at this step,\n including the additional tensors for which evaluation was requested in\n `step_begin`.\n\n In addition, the callback has the opportunity to stop training by returning\n `True`. This is useful for early stopping, for example.\n\n Args:\n step: `int`, the current value of the global step.\n outputs: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n\n Returns:\n `bool`. True if training should stop.\n \"\"\"\n return False\n\n def every_n_post_step(self, step, session):\n \"\"\"Callback after a step is finished or `end()` is called.\n\n Args:\n step: `int`, the current value of the global step.\n session: `Session` object.\n \"\"\"\n pass\n\n def step_begin(self, step):\n \"\"\"Overrides `BaseMonitor.step_begin`.\n\n When overriding this method, you must call the super implementation.\n\n Args:\n step: `int`, the current value of the global step.\n Returns:\n A `list`, the result of every_n_step_begin, if that was called this step,\n or an empty list otherwise.\n\n Raises:\n ValueError: if called more than once during a step.\n \"\"\"\n super(EveryN, self).step_begin(step)\n if (step <= self._first_n_steps or\n step >= (self._every_n_steps + self._last_active_step) or\n step == self._max_steps): # Note: max_steps can be None here.\n self._every_n_step_begin_called = True\n return self.every_n_step_begin(step)\n self._every_n_step_begin_called = False\n return []\n\n def step_end(self, step, output):\n \"\"\"Overrides `BaseMonitor.step_end`.\n\n When overriding this method, you must call the super implementation.\n\n Args:\n step: `int`, the current value of the global step.\n output: `dict` mapping `string` values representing tensor names to\n the value resulted from running these tensors. Values may be either\n scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.\n Returns:\n `bool`, the result of every_n_step_end, if that was called this step,\n or `False` otherwise.\n \"\"\"\n super(EveryN, self).step_end(step, output)\n if self._every_n_step_begin_called:\n return self.every_n_step_end(step, output)\n return False\n\n def post_step(self, step, session):\n super(EveryN, self).post_step(step, session)\n if self._every_n_step_begin_called:\n self.every_n_post_step(step, session)\n self._last_active_step = step\n self._last_successful_step = step\n\n def end(self, session=None):\n super(EveryN, self).end(session=session)\n if self._last_successful_step != self._last_active_step:\n self.every_n_post_step(self._last_successful_step, session)\n\n\nclass StopAtStep(BaseMonitor):\n \"\"\"Monitor to request stop at a specified step.\"\"\"\n\n def __init__(self, num_steps=None, last_step=None):\n \"\"\"Create a StopAtStep monitor.\n\n This monitor requests stop after either a number of steps have been\n executed or a last step has been reached. Only of the two options can be\n specified.\n\n if `num_steps` is specified, it indicates the number of steps to execute\n after `begin()` is called. If instead `last_step` is specified, it\n indicates the last step we want to execute, as passed to the `step_begin()`\n call.\n\n Args:\n num_steps: Number of steps to execute.\n last_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n \"\"\"\n super(StopAtStep, self).__init__()\n if num_steps is None and last_step is None:\n raise ValueError(\"One of num_steps or last_step must be specified.\")\n if num_steps is not None and last_step is not None:\n raise ValueError(\"Only one of num_steps or last_step can be specified.\")\n self._num_steps = num_steps\n self._last_step = last_step\n\n @property\n def run_on_all_workers(self):\n return True\n\n def step_begin(self, step):\n super(StopAtStep, self).step_begin(step)\n if self._last_step is None:\n self._last_step = step + self._num_steps - 1\n return []\n\n def step_end(self, step, output):\n super(StopAtStep, self).step_end(step, output)\n return step >= self._last_step\n\n\n# TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout.\nclass PrintTensor(EveryN):\n \"\"\"Prints given tensors every N steps.\n\n This is an `EveryN` monitor and has consistent semantic for `every_n`\n and `first_n`.\n\n The tensors will be printed to the log, with `INFO` severity.\n \"\"\"\n\n def __init__(self, tensor_names, every_n=100, first_n=1):\n \"\"\"Initializes a PrintTensor monitor.\n\n Args:\n tensor_names: `dict` of tag to tensor names or\n `iterable` of tensor names (strings).\n every_n: `int`, print every N steps. See `PrintN.`\n first_n: `int`, also print the first N steps. See `PrintN.`\n \"\"\"\n super(PrintTensor, self).__init__(every_n, first_n)\n if not isinstance(tensor_names, dict):\n tensor_names = {item: item for item in tensor_names}\n self._tensor_names = tensor_names\n\n def every_n_step_begin(self, step):\n super(PrintTensor, self).every_n_step_begin(step)\n return list(self._tensor_names.values())\n\n def every_n_step_end(self, step, outputs):\n super(PrintTensor, self).every_n_step_end(step, outputs)\n stats = []\n for tag, tensor_name in six.iteritems(self._tensor_names):\n if tensor_name in outputs:\n stats.append(\"%s = %s\" % (tag,\n str(_extract_output(outputs, tensor_name))))\n logging.info(\"Step %d: %s\", step, \", \".join(stats))\n\n\nclass LoggingTrainable(EveryN):\n \"\"\"Writes trainable variable values into log every N steps.\n\n Write the tensors in trainable variables `every_n` steps,\n starting with the `first_n`th step.\n\n \"\"\"\n\n def __init__(self, scope=None, every_n=100, first_n=1):\n \"\"\"Initializes LoggingTrainable monitor.\n\n Args:\n scope: An optional string to match variable names using re.match.\n every_n: Print every N steps.\n first_n: Print first N steps.\n \"\"\"\n super(LoggingTrainable, self).__init__(every_n, first_n)\n self._scope = scope\n\n def every_n_step_begin(self, step):\n super(LoggingTrainable, self).every_n_step_begin(step)\n # Get a list of trainable variables at the begining of every N steps.\n # We cannot get this in __init__ because train_op has not been generated.\n trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=self._scope)\n self._names = {}\n for var in trainables:\n self._names[var.name] = var.value().name\n return list(self._names.values())\n\n def every_n_step_end(self, step, outputs):\n super(LoggingTrainable, self).every_n_step_end(step, outputs)\n stats = []\n for tag, tensor_name in six.iteritems(self._names):\n if tensor_name in outputs:\n stats.append(\"%s = %s\" % (tag,\n str(_extract_output(outputs, tensor_name))))\n logging.info(\"Logging Trainable: Step %d: %s\", step, \", \".join(stats))\n\n\nclass SummarySaver(EveryN):\n \"\"\"Saves summaries every N steps.\"\"\"\n\n def __init__(self,\n summary_op,\n save_steps=100,\n output_dir=None,\n summary_writer=None,\n scaffold=None):\n \"\"\"Initializes a `SummarySaver` monitor.\n\n Args:\n summary_op: `Tensor` of type `string`. A serialized `Summary` protocol\n buffer, as output by TF summary methods like `scalar_summary` or\n `merge_all_summaries`.\n save_steps: `int`, save summaries every N steps. See `EveryN`.\n output_dir: `string`, the directory to save the summaries to. Only used\n if no `summary_writer` is supplied.\n summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,\n one will be created accordingly.\n scaffold: `Scaffold` to get summary_op if it's not provided.\n \"\"\"\n # TODO(ipolosukhin): Implement every N seconds.\n super(SummarySaver, self).__init__(every_n_steps=save_steps)\n self._summary_op = summary_op\n self._summary_writer = summary_writer\n if summary_writer is None and output_dir:\n self._summary_writer = summary_io.SummaryWriter(output_dir)\n self._scaffold = scaffold\n # TODO(mdan): Throw an error if output_dir and summary_writer are None.\n\n def set_estimator(self, estimator):\n super(SummarySaver, self).set_estimator(estimator)\n # TODO(mdan): This line looks redundant.\n if self._summary_writer is None:\n self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)\n\n def every_n_step_begin(self, step):\n super(SummarySaver, self).every_n_step_begin(step)\n if self._summary_op is None and self._scaffold is not None:\n self._summary_op = self._scaffold.summary_op\n if self._summary_op is not None:\n return [self._summary_op]\n return []\n\n def every_n_step_end(self, step, outputs):\n super(SummarySaver, self).every_n_step_end(step, outputs)\n if self._summary_op is not None:\n summary_strs = _extract_output(outputs, self._summary_op)\n if self._summary_writer:\n self._summary_writer.add_summary(summary_strs, step)\n return False\n\n def end(self, session=None):\n super(SummarySaver, self).end(session=session)\n if self._summary_writer:\n self._summary_writer.flush()\n\n\nclass ValidationMonitor(EveryN):\n \"\"\"Runs evaluation of a given estimator, at most every N steps.\n\n Note that the evaluation is done based on the saved checkpoint, which will\n usually be older than the current step.\n\n Can do early stopping on validation metrics if `early_stopping_rounds` is\n provided.\n \"\"\"\n\n def __init__(self, x=None, y=None, input_fn=None, batch_size=None,\n eval_steps=None,\n every_n_steps=100, metrics=None, early_stopping_rounds=None,\n early_stopping_metric=\"loss\",\n early_stopping_metric_minimize=True, name=None):\n \"\"\"Initializes a ValidationMonitor.\n\n Args:\n x: See `BaseEstimator.evaluate`.\n y: See `BaseEstimator.evaluate`.\n input_fn: See `BaseEstimator.evaluate`.\n batch_size: See `BaseEstimator.evaluate`.\n eval_steps: See `BaseEstimator.evaluate`.\n every_n_steps: Check for new checkpoints to evaluate every N steps. If a\n new checkpoint is found, it is evaluated. See `EveryN`.\n metrics: See `BaseEstimator.evaluate`.\n early_stopping_rounds: `int`. If the metric indicated by\n `early_stopping_metric` does not change according to\n `early_stopping_metric_minimize` for this many steps, then training\n will be stopped.\n early_stopping_metric: `string`, name of the metric to check for early\n stopping.\n early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is\n expected to decrease (thus early stopping occurs when this metric\n stops decreasing), False if `early_stopping_metric` is expected to\n increase. Typically, `early_stopping_metric_minimize` is True for\n loss metrics like mean squared error, and False for performance\n metrics like accuracy.\n name: See `BaseEstimator.evaluate`.\n\n Raises:\n ValueError: If both x and input_fn are provided.\n \"\"\"\n super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,\n first_n_steps=-1)\n # TODO(mdan): Checks like this are already done by evaluate.\n if x is None and input_fn is None:\n raise ValueError(\"Either x or input_fn should be provided.\")\n self.x = x\n self.y = y\n self.input_fn = input_fn\n self.batch_size = batch_size\n self.eval_steps = eval_steps\n self.metrics = metrics\n self.early_stopping_rounds = early_stopping_rounds\n self.early_stopping_metric = early_stopping_metric\n self.early_stopping_metric_minimize = early_stopping_metric_minimize\n self.name = name\n self._best_value_step = None\n self._best_value = None\n self._early_stopped = False\n self._latest_path = None\n self._latest_path_step = None\n\n @property\n def early_stopped(self):\n \"\"\"Returns True if this monitor caused an early stop.\"\"\"\n return self._early_stopped\n\n @property\n def best_step(self):\n \"\"\"Returns the step at which the best early stopping metric was found.\"\"\"\n return self._best_value_step\n\n @property\n def best_value(self):\n \"\"\"Returns the best early stopping metric value found so far.\"\"\"\n return self._best_value\n\n def every_n_step_end(self, step, outputs):\n super(ValidationMonitor, self).every_n_step_end(step, outputs)\n # TODO(mdan): The use of step below is probably misleading.\n # The code should probably use the step from the checkpoint, because\n # that's what is being evaluated.\n if self._estimator is None:\n raise ValueError(\"Missing call to set_estimator.\")\n # Check that we are not running evaluation on the same checkpoint.\n latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)\n if latest_path is None:\n logging.debug(\"Skipping evaluation since model has not been saved yet \"\n \"at step %d.\", step)\n return False\n if latest_path is not None and latest_path == self._latest_path:\n logging.debug(\"Skipping evaluation due to same checkpoint %s for step %d \"\n \"as for step %d.\", latest_path, step,\n self._latest_path_step)\n return False\n self._latest_path = latest_path\n self._latest_path_step = step\n\n # Run evaluation and log it.\n validation_outputs = self._estimator.evaluate(\n x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size,\n steps=self.eval_steps, metrics=self.metrics, name=self.name)\n stats = []\n for name in validation_outputs:\n stats.append(\"%s = %s\" % (name, str(validation_outputs[name])))\n logging.info(\"Validation (step %d): %s\", step, \", \".join(stats))\n\n # Early stopping logic.\n if self.early_stopping_rounds is not None:\n if self.early_stopping_metric not in validation_outputs:\n raise ValueError(\"Metric %s missing from outputs %s.\" % (\n self.early_stopping_metric, set(validation_outputs.keys())))\n current_value = validation_outputs[self.early_stopping_metric]\n if (self._best_value is None or (self.early_stopping_metric_minimize and\n (current_value < self._best_value)) or\n (not self.early_stopping_metric_minimize and\n (current_value > self._best_value))):\n self._best_value = current_value\n self._best_value_step = step\n stop_now = (step - self._best_value_step >= self.early_stopping_rounds)\n if stop_now:\n logging.info(\"Stopping. Best step: {} with {} = {}.\"\n .format(self._best_value_step,\n self.early_stopping_metric, self._best_value))\n self._early_stopped = True\n return True\n return False\n\n\n# TODO(ptucker): This really reads any tensor, not just vars, and requires the\n# ':0' suffix on var_name.\nclass CaptureVariable(EveryN):\n \"\"\"Captures a variable's values into a collection.\n\n This monitor is useful for unit testing. You should exercise caution when\n using this monitor in production, since it never discards values.\n\n This is an `EveryN` monitor and has consistent semantic for `every_n`\n and `first_n`.\n \"\"\"\n\n def __init__(self, var_name, every_n=100, first_n=1):\n \"\"\"Initializes a CaptureVariable monitor.\n\n Args:\n var_name: `string`. The variable name, including suffix (typically \":0\").\n every_n: `int`, print every N steps. See `PrintN.`\n first_n: `int`, also print the first N steps. See `PrintN.`\n \"\"\"\n super(CaptureVariable, self).__init__(every_n, first_n)\n self._var_name = var_name\n self._var_values = {}\n\n @property\n def values(self):\n \"\"\"Returns the values captured so far.\n\n Returns:\n `dict` mapping `int` step numbers to that values of the variable at the\n respective step.\n \"\"\"\n return self._var_values\n\n def every_n_step_begin(self, step):\n super(CaptureVariable, self).every_n_step_begin(step)\n return [self._var_name]\n\n def every_n_step_end(self, step, outputs):\n super(CaptureVariable, self).every_n_step_end(step, outputs)\n self._var_values[step] = _extract_output(outputs, self._var_name)\n\n\ndef get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,\n output_dir=None, summary_writer=None):\n \"\"\"Returns a default set of typically-used monitors.\n\n Args:\n loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`\n at the default interval.\n summary_op: See `SummarySaver`.\n save_summary_steps: See `SummarySaver`.\n output_dir: See `SummarySaver`.\n summary_writer: See `SummarySaver`.\n Returns:\n `list` of monitors.\n \"\"\"\n\n monitors = []\n if loss_op is not None:\n monitors.append(PrintTensor(tensor_names={\"loss\": loss_op.name}))\n if summary_op is not None:\n monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,\n output_dir=output_dir,\n summary_writer=summary_writer))\n return monitors\n\n\nclass GraphDump(BaseMonitor):\n \"\"\"Dumps almost all tensors in the graph at every step.\n\n Note, this is very expensive, prefer `PrintTensor` in production.\n \"\"\"\n\n IGNORE_OPS = [\"Const\", \"Assign\", \"Identity\", \"Placeholder\",\n \"RandomUniform\", \"Cast\", \"RestoreSlice\"]\n\n def __init__(self, ignore_ops=None):\n \"\"\"Initializes GraphDump monitor.\n\n Args:\n ignore_ops: `list` of `string`. Names of ops to ignore.\n If None, `GraphDump.IGNORE_OPS` is used.\n \"\"\"\n super(GraphDump, self).__init__()\n self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS\n self._data = {}\n\n def begin(self, max_steps=None):\n super(GraphDump, self).begin(max_steps=max_steps)\n self._tensors = []\n graph = ops.get_default_graph()\n graph_def = graph.as_graph_def()\n for node in graph_def.node:\n if node.op in self._ignore_ops:\n continue\n logging.info(\"op=%s name=%s.\", node.op, node.name)\n try:\n self._tensors.append(graph.get_tensor_by_name(node.name + \":0\"))\n except KeyError:\n pass\n\n def step_begin(self, step):\n super(GraphDump, self).step_begin(step)\n return self._tensors\n\n def step_end(self, step, output):\n super(GraphDump, self).step_end(step, output)\n self._data[step] = output\n\n @property\n def data(self):\n return self._data\n\n # TODO(ptucker): Handle keys that are in one but not the other.\n def compare(self, other_dump, step, atol=1e-06):\n \"\"\"Compares two `GraphDump` monitors and returns differences.\n\n Args:\n other_dump: Another `GraphDump` monitor.\n step: `int`, step to compare on.\n atol: `float`, absolute tolerance in comparison of floating arrays.\n\n Returns:\n Returns tuple:\n matched: `list` of keys that matched.\n non_matched: `dict` of keys to tuple of 2 mismatched values.\n\n Raises:\n ValueError: if a key in `data` is missing from `other_dump` at `step`.\n \"\"\"\n non_matched = {}\n matched = []\n this_output = self.data[step] if step in self.data else {}\n other_output = other_dump.data[step] if step in other_dump.data else {}\n for key in this_output:\n if not isinstance(key, str) and not isinstance(key, unicode):\n continue\n if key not in other_output:\n raise ValueError(\"%s missing at step %s.\", (key, step))\n value1 = _extract_output(this_output, key)\n value2 = _extract_output(other_output, key)\n if isinstance(value1, str):\n continue\n if isinstance(value1, np.ndarray):\n if not np.allclose(value1, value2, atol=atol):\n non_matched[key] = value1 - value2\n else:\n matched.append(key)\n else:\n if value1 != value2:\n non_matched[key] = (value1, value2)\n else:\n matched.append(key)\n return matched, non_matched\n\n\nclass ExportMonitor(EveryN):\n \"\"\"Monitor that exports Estimator every N steps.\"\"\"\n\n # TODO(philstahlfeld): Investigate switching export.export_estimator\n # configuration values to **kwargs so that updates to the export_estimator\n # function don't have to be reflected here.\n @deprecated_arg_values(\n \"2016-09-23\",\n \"The signature of the input_fn accepted by export is changing to be \"\n \"consistent with what's used by tf.Learn Estimator's train/evaluate. \"\n \"input_fn (and in most cases, input_feature_key) will both become \"\n \"required args.\",\n input_fn=None)\n def __init__(self,\n every_n_steps,\n export_dir,\n input_fn=None,\n input_feature_key=None,\n exports_to_keep=5,\n signature_fn=None,\n default_batch_size=1):\n \"\"\"Initializes ExportMonitor.\n\n Args:\n every_n_steps: Run monitor every N steps.\n export_dir: str, folder to export.\n input_fn: A function that takes no argument and returns a tuple of\n (features, targets), where features is a dict of string key to `Tensor`\n and targets is a `Tensor` that's currently not used (and so can be\n `None`).\n input_feature_key: String key into the features dict returned by\n `input_fn` that corresponds to the raw `Example` strings `Tensor` that\n the exported model will take as input. Can only be `None` if you're\n using a custom `signature_fn` that does not use the first arg\n (examples).\n exports_to_keep: int, number of exports to keep.\n signature_fn: Function that returns a default signature and a named\n signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s\n for features and `dict` of `Tensor`s for predictions.\n default_batch_size: Default batch size of the `Example` placeholder.\n\n Raises:\n ValueError: If `input_fn` and `input_feature_key` are not both defined or\n are not both `None`.\n \"\"\"\n super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)\n self._export_dir = export_dir\n self._input_fn = input_fn\n self._input_feature_key = input_feature_key\n self._use_deprecated_input_fn = input_fn is None\n self._exports_to_keep = exports_to_keep\n self._signature_fn = signature_fn\n self._default_batch_size = default_batch_size\n self._last_export_dir = None\n\n @property\n def export_dir(self):\n return self._export_dir\n\n @property\n def exports_to_keep(self):\n return self._exports_to_keep\n\n @property\n def signature_fn(self):\n return self._signature_fn\n\n @property\n def last_export_dir(self):\n \"\"\"Returns the directory containing the last completed export.\n\n Returns:\n The string path to the exported directory. NB: this functionality was\n added on 2016/09/25; clients that depend on the return value may need\n to handle the case where this function returns None because the\n estimator being fitted does not yet return a value during export.\n \"\"\"\n return self._last_export_dir\n\n def every_n_step_end(self, step, outputs):\n super(ExportMonitor, self).every_n_step_end(step, outputs)\n try:\n self._last_export_dir = self._estimator.export(\n self.export_dir,\n exports_to_keep=self.exports_to_keep,\n signature_fn=self.signature_fn,\n input_fn=self._input_fn,\n default_batch_size=self._default_batch_size,\n input_feature_key=self._input_feature_key,\n use_deprecated_input_fn=self._use_deprecated_input_fn)\n except RuntimeError:\n # Currently we are not syncronized with saving checkpoints, which leads to\n # runtime errors when we are calling export on the same global step.\n # Exports depend on saved checkpoints for constructing the graph and\n # getting the global step from the graph instance saved in the checkpoint.\n # If the checkpoint is stale with respect to current step, the global step\n # is taken to be the last saved checkpoint's global step and exporter\n # doesn't export the same checkpoint again with the following error.\n logging.info(\"Skipping exporting because the existing checkpoint has \"\n \"already been exported. \"\n \"Consider exporting less frequently.\")\n\n def end(self, session=None):\n super(ExportMonitor, self).end(session=session)\n latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)\n if latest_path is None:\n logging.info(\"Skipping export at the end since model has not been saved \"\n \"yet.\")\n return\n try:\n self._last_export_dir = self._estimator.export(\n self.export_dir,\n exports_to_keep=self.exports_to_keep,\n signature_fn=self.signature_fn,\n input_fn=self._input_fn,\n default_batch_size=self._default_batch_size,\n input_feature_key=self._input_feature_key,\n use_deprecated_input_fn=self._use_deprecated_input_fn)\n except RuntimeError:\n logging.info(\"Skipping exporting for the same step.\")\n\n\nclass CheckpointSaver(BaseMonitor):\n \"\"\"Saves checkpoints every N steps.\"\"\"\n\n def __init__(self,\n checkpoint_dir,\n save_secs=None,\n save_steps=None,\n saver=None,\n checkpoint_basename=\"model.ckpt\",\n scaffold=None):\n \"\"\"Initialize CheckpointSaver monitor.\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n saver: `Saver` object, used for saving.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object.\n\n Raises:\n ValueError: If both `save_steps` and `save_secs` are not `None`.\n ValueError: If both `save_steps` and `save_secs` are `None`.\n \"\"\"\n logging.info(\"Create CheckpointSaver.\")\n super(CheckpointSaver, self).__init__()\n self._saver = saver\n self._summary_writer = SummaryWriterCache.get(checkpoint_dir)\n self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n self._scaffold = scaffold\n self._save_secs = save_secs\n self._save_steps = save_steps\n self._last_saved_time = None\n self._last_begin_step = None\n self._last_saved_step = None\n\n if save_steps is None and save_secs is None:\n raise ValueError(\"Either save_steps or save_secs should be provided\")\n if (save_steps is not None) and (save_secs is not None):\n raise ValueError(\"Can not provide both save_steps and save_secs.\")\n\n def begin(self, max_steps=None):\n super(CheckpointSaver, self).begin(max_steps)\n self._last_saved_time = None\n self._last_begin_step = None\n self._last_saved_step = None\n\n def step_begin(self, step):\n super(CheckpointSaver, self).step_begin(step)\n self._last_begin_step = step\n\n def post_step(self, step, session):\n super(CheckpointSaver, self).post_step(step, session)\n if self._last_saved_time is None:\n self._save(step, session)\n\n if self._save_steps is not None:\n if step >= self._last_saved_step + self._save_steps:\n self._save(step, session)\n\n if self._save_secs is not None:\n if time.time() >= self._last_saved_time + self._save_secs:\n self._save(step, session)\n\n def end(self, session=None):\n super(CheckpointSaver, self).end(session)\n self._save(self._last_begin_step, session)\n\n def _save(self, step, session):\n \"\"\"Saves the latest checkpoint.\"\"\"\n if step == self._last_saved_step:\n return\n logging.info(\"Saving checkpoints for %d into %s.\", step, self._save_path)\n self._last_saved_time = time.time()\n self._last_saved_step = step\n if self._saver is None:\n self._scaffold.saver.save(session, self._save_path, global_step=step)\n else:\n self._saver.save(session, self._save_path, global_step=step)\n self._summary_writer.add_session_log(\n SessionLog(\n status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),\n step)\n\n\nclass StepCounter(EveryN):\n \"\"\"Steps per second monitor.\"\"\"\n\n def __init__(self, every_n_steps=100, output_dir=None,\n summary_writer=None):\n super(StepCounter, self).__init__(every_n_steps=every_n_steps)\n self._summary_tag = \"global_step/sec\"\n self._last_reported_step = None\n self._last_reported_time = None\n self._summary_writer = summary_writer\n if summary_writer is None and output_dir:\n self._summary_writer = SummaryWriterCache.get(output_dir)\n\n def set_estimator(self, estimator):\n super(StepCounter, self).set_estimator(estimator)\n if self._summary_writer is None:\n self._summary_writer = SummaryWriterCache.get(estimator.model_dir)\n\n def every_n_step_end(self, current_step, outputs):\n current_time = time.time()\n if self._last_reported_time is not None and self._summary_writer:\n added_steps = current_step - self._last_reported_step\n elapsed_time = current_time - self._last_reported_time\n steps_per_sec = added_steps / elapsed_time\n summary = Summary(value=[Summary.Value(tag=self._summary_tag,\n simple_value=steps_per_sec)])\n self._summary_writer.add_summary(summary, current_step)\n self._last_reported_step = current_step\n self._last_reported_time = current_time\n\n\nclass NanLossDuringTrainingError(RuntimeError):\n\n def __str__(self):\n return \"NaN loss during training.\"\n\n\nclass NanLoss(EveryN):\n \"\"\"NaN Loss monitor.\n\n Monitors loss and stops training if loss is NaN.\n Can either fail with exception or just stop training.\n \"\"\"\n\n def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):\n \"\"\"Initializes NanLoss monitor.\n\n Args:\n loss_tensor: `Tensor`, the loss tensor.\n every_n_steps: `int`, run check every this many steps.\n fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.\n \"\"\"\n super(NanLoss, self).__init__(every_n_steps=every_n_steps)\n self._loss_tensor = loss_tensor\n self._fail_on_nan_loss = fail_on_nan_loss\n\n def every_n_step_begin(self, step):\n super(NanLoss, self).every_n_step_begin(step)\n return [self._loss_tensor]\n\n def every_n_step_end(self, step, outputs):\n super(NanLoss, self).every_n_step_end(step, outputs)\n if np.isnan(_extract_output(outputs, self._loss_tensor)):\n failure_message = \"Model diverged with loss = NaN.\"\n if self._fail_on_nan_loss:\n logging.error(failure_message)\n raise NanLossDuringTrainingError\n else:\n logging.warning(failure_message)\n # We don't raise an error but we return \"should stop\" so we stop, but\n # without an exception.\n return True\n\n\nclass RunHookAdapterForMonitors(session_run_hook.SessionRunHook):\n \"\"\"Wraps monitors into a SessionRunHook.\"\"\"\n\n def __init__(self, monitors):\n self._monitors = monitors\n\n def begin(self):\n self._last_step = None\n self._global_step_tensor = contrib_variables.get_global_step()\n for m in self._monitors:\n m.begin(max_steps=None)\n\n def before_run(self, run_context):\n if self._last_step is None:\n self._last_step = run_context.session.run(self._global_step_tensor) + 1\n\n request = {self._global_step_tensor: self._global_step_tensor}\n monitor_fetches = []\n for m in self._monitors:\n monitor_requests = m.step_begin(self._last_step)\n if monitor_requests:\n if not isinstance(monitor_requests, list):\n raise ValueError(\"Monitor.step_begin should return a list.\")\n monitor_fetches.extend(monitor_requests)\n if monitor_fetches:\n request[\"monitors\"] = dict(\n zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))\n\n return session_run_hook.SessionRunArgs(request)\n\n def after_run(self, run_context, run_values):\n result = run_values.results[\n \"monitors\"] if \"monitors\" in run_values.results else {}\n for m in self._monitors:\n induce_stop = m.step_end(self._last_step, result)\n if induce_stop:\n run_context.request_stop()\n\n for m in self._monitors:\n m.post_step(self._last_step, run_context.session)\n\n self._last_step = run_values.results[self._global_step_tensor] + 1\n\n def end(self, session):\n self._last_step = None\n for m in self._monitors:\n if \"session\" in inspect.getargspec(m.end).args:\n m.end(session=session)\n else:\n m.end()\n\n\ndef _as_graph_element(obj):\n \"\"\"Retrieves Graph element.\"\"\"\n graph = ops.get_default_graph()\n if not isinstance(obj, six.string_types):\n if not hasattr(obj, \"graph\") or obj.graph != graph:\n raise ValueError(\"Passed %s should have graph attribute that is equal \"\n \"to current graph %s.\" % (obj, graph))\n return obj\n if \":\" in obj:\n element = graph.as_graph_element(obj)\n else:\n element = graph.as_graph_element(obj + \":0\")\n # Check that there is no :1 (e.g. it's single output).\n try:\n graph.as_graph_element(obj + \":1\")\n except (KeyError, ValueError):\n pass\n else:\n raise ValueError(\"Name %s is ambiguous, \"\n \"as this `Operation` has multiple outputs \"\n \"(at least 2).\" % obj)\n return element\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Some common SessionRunHook classes.\n\n@@\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\nimport numpy as np\nimport six\n\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn import session_run_hook\nfrom tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs\nfrom tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache\nfrom tensorflow.core.framework.summary_pb2 import Summary\nfrom tensorflow.core.util.event_pb2 import SessionLog\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import training_util\n\n\nclass LoggingTensorHook(session_run_hook.SessionRunHook):\n \"\"\"Prints given tensors every N iteration.\n\n The tensors will be printed to the log, with `INFO` severity.\n \"\"\"\n\n def __init__(self, tensors, every_n_iter=100):\n \"\"\"Initializes a LoggingHook monitor.\n\n Args:\n tensors: `dict` of tag to tensors/names or\n `iterable` of tensors/names.\n every_n_iter: `int`, print every N iteration.\n\n Raises:\n ValueError: if `every_n_iter` is non-positive.\n \"\"\"\n if every_n_iter <= 0:\n raise ValueError(\"Invalid every_n_iter=%s.\" % every_n_iter)\n if not isinstance(tensors, dict):\n tensors = {item: item for item in tensors}\n self._tensors = tensors\n self._every_n_iter = every_n_iter\n\n def begin(self):\n self._iter_count = 0\n # Convert names to tensors if given\n self._current_tensors = {tag: _as_graph_element(tensor)\n for (tag, tensor) in self._tensors.items()}\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n if self._iter_count % self._every_n_iter == 0:\n return SessionRunArgs(self._current_tensors)\n else:\n return None\n\n def after_run(self, run_context, run_values):\n _ = run_context\n if self._iter_count % self._every_n_iter == 0:\n stats = []\n for tag in sorted(self._current_tensors.keys()):\n stats.append(\"%s = %s\" % (tag, run_values.results[tag]))\n logging.info(\"%s\", \", \".join(stats))\n self._iter_count += 1\n\n\nclass StopAtStepHook(session_run_hook.SessionRunHook):\n \"\"\"Monitor to request stop at a specified step.\"\"\"\n\n def __init__(self, num_steps=None, last_step=None):\n \"\"\"Create a StopAtStep Hook.\n\n This hook requests stop after either a number of steps have been\n executed or a last step has been reached. Only of the two options can be\n specified.\n\n if `num_steps` is specified, it indicates the number of steps to execute\n after `begin()` is called. If instead `last_step` is specified, it\n indicates the last step we want to execute, as passed to the `after_run()`\n call.\n\n Args:\n num_steps: Number of steps to execute.\n last_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n \"\"\"\n if num_steps is None and last_step is None:\n raise ValueError(\"One of num_steps or last_step must be specified.\")\n if num_steps is not None and last_step is not None:\n raise ValueError(\"Only one of num_steps or last_step can be specified.\")\n self._num_steps = num_steps\n self._last_step = last_step\n\n def begin(self):\n self._global_step_tensor = contrib_variables.get_global_step()\n if self._global_step_tensor is None:\n raise RuntimeError(\"Global step should be created to use StopAtStepHook.\")\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n return SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n global_step = run_values.results\n if self._last_step is None:\n self._last_step = global_step + self._num_steps - 1\n if global_step >= self._last_step:\n run_context.request_stop()\n\n\nclass CheckpointSaverHook(session_run_hook.SessionRunHook):\n \"\"\"Saves checkpoints every N steps or seconds.\"\"\"\n\n def __init__(self,\n checkpoint_dir,\n save_secs=None,\n save_steps=None,\n saver=None,\n checkpoint_basename=\"model.ckpt\",\n scaffold=None):\n \"\"\"Initialize CheckpointSaverHook monitor.\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n saver: `Saver` object, used for saving.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object.\n\n Raises:\n ValueError: One of `save_steps` or `save_secs` should be set.\n \"\"\"\n logging.info(\"Create CheckpointSaverHook.\")\n self._saver = saver\n self._checkpoint_dir = checkpoint_dir\n self._summary_writer = SummaryWriterCache.get(checkpoint_dir)\n self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n self._scaffold = scaffold\n self._save_secs = save_secs\n self._save_steps = save_steps\n self._last_saved_time = None\n self._last_saved_step = None\n\n if save_steps is None and save_secs is None:\n raise ValueError(\"Either save_steps or save_secs should be provided\")\n if (save_steps is not None) and (save_secs is not None):\n raise ValueError(\"Can not provide both save_steps and save_secs.\")\n\n def begin(self):\n self._last_saved_time = None\n self._last_saved_step = None\n self._global_step_tensor = contrib_variables.get_global_step()\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use CheckpointSaverHook.\")\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n if self._last_saved_time is None:\n # Write graph in the first call.\n training_util.write_graph(\n ops.get_default_graph().as_graph_def(add_shapes=True),\n self._checkpoint_dir,\n \"graph.pbtxt\")\n self._summary_writer.add_graph(ops.get_default_graph())\n\n return SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n global_step = run_values.results\n if self._last_saved_time is None:\n self._save(global_step, run_context.session)\n\n if self._save_steps is not None:\n if global_step >= self._last_saved_step + self._save_steps:\n self._save(global_step, run_context.session)\n\n if self._save_secs is not None:\n if time.time() >= self._last_saved_time + self._save_secs:\n self._save(global_step, run_context.session)\n\n def end(self, session):\n last_step = session.run(contrib_variables.get_global_step())\n self._save(last_step, session)\n\n def _save(self, step, session):\n \"\"\"Saves the latest checkpoint.\"\"\"\n if step == self._last_saved_step:\n return\n logging.info(\"Saving checkpoints for %d into %s.\", step, self._save_path)\n self._last_saved_time = time.time()\n self._last_saved_step = step\n if self._saver is None:\n self._scaffold.saver.save(session, self._save_path, global_step=step)\n else:\n self._saver.save(session, self._save_path, global_step=step)\n self._summary_writer.add_session_log(\n SessionLog(\n status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),\n step)\n\n\nclass StepCounterHook(session_run_hook.SessionRunHook):\n \"\"\"Steps per second monitor.\"\"\"\n\n def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):\n self._summary_tag = \"global_step/sec\"\n self._every_n_steps = every_n_steps\n self._summary_writer = summary_writer\n if summary_writer is None and output_dir:\n self._summary_writer = SummaryWriterCache.get(output_dir)\n\n def begin(self):\n self._last_reported_time = None\n self._last_reported_step = None\n self._global_step_tensor = contrib_variables.get_global_step()\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use StepCounterHook.\")\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n return SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n _ = run_context\n if not self._summary_writer:\n return\n\n global_step = run_values.results\n current_time = time.time()\n if self._last_reported_time is None:\n self._last_reported_step = global_step\n self._last_reported_time = current_time\n else:\n if global_step >= self._every_n_steps + self._last_reported_step:\n added_steps = global_step - self._last_reported_step\n elapsed_time = current_time - self._last_reported_time\n steps_per_sec = added_steps / elapsed_time\n summary = Summary(value=[Summary.Value(\n tag=self._summary_tag, simple_value=steps_per_sec)])\n self._summary_writer.add_summary(summary, global_step)\n self._last_reported_step = global_step\n self._last_reported_time = current_time\n\n\nclass NanLossDuringTrainingError(RuntimeError):\n\n def __str__(self):\n return \"NaN loss during training.\"\n\n\nclass NanTensorHook(session_run_hook.SessionRunHook):\n \"\"\"NaN Loss monitor.\n\n Monitors loss and stops training if loss is NaN.\n Can either fail with exception or just stop training.\n \"\"\"\n\n def __init__(self, loss_tensor, fail_on_nan_loss=True):\n \"\"\"Initializes NanLoss monitor.\n\n Args:\n loss_tensor: `Tensor`, the loss tensor.\n fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.\n \"\"\"\n self._loss_tensor = loss_tensor\n self._fail_on_nan_loss = fail_on_nan_loss\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n return SessionRunArgs(self._loss_tensor)\n\n def after_run(self, run_context, run_values):\n if np.isnan(run_values.results):\n failure_message = \"Model diverged with loss = NaN.\"\n if self._fail_on_nan_loss:\n logging.error(failure_message)\n raise NanLossDuringTrainingError\n else:\n logging.warning(failure_message)\n # We don't raise an error but we request stop without an exception.\n run_context.request_stop()\n\n\nclass SummarySaverHook(session_run_hook.SessionRunHook):\n \"\"\"Saves summaries every N steps.\"\"\"\n\n def __init__(self,\n save_steps=100,\n output_dir=None,\n summary_writer=None,\n scaffold=None,\n summary_op=None):\n \"\"\"Initializes a `SummarySaver` monitor.\n\n Args:\n save_steps: `int`, save summaries every N steps. See `EveryN`.\n output_dir: `string`, the directory to save the summaries to. Only used\n if no `summary_writer` is supplied.\n summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,\n one will be created accordingly.\n scaffold: `Scaffold` to get summary_op if it's not provided.\n summary_op: `Tensor` of type `string`. A serialized `Summary` protocol\n buffer, as output by TF summary methods like `scalar_summary` or\n `merge_all_summaries`.\n \"\"\"\n # TODO(ipolosukhin): Implement every N seconds.\n self._summary_op = summary_op\n self._summary_writer = summary_writer\n if summary_writer is None and output_dir:\n self._summary_writer = SummaryWriterCache.get(output_dir)\n self._scaffold = scaffold\n self._save_steps = save_steps\n # TODO(mdan): Throw an error if output_dir and summary_writer are None.\n\n def begin(self):\n self._last_saved_step = None\n self._request_summary = True\n self._global_step_tensor = contrib_variables.get_global_step()\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use SummarySaverHook.\")\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n requests = {\"global_step\": self._global_step_tensor}\n if self._request_summary:\n if self._summary_op is not None:\n requests[\"summary\"] = self._summary_op\n elif self._scaffold.summary_op is not None:\n requests[\"summary\"] = self._scaffold.summary_op\n\n return SessionRunArgs(requests)\n\n def after_run(self, run_context, run_values):\n _ = run_context\n if not self._summary_writer:\n return\n\n global_step = run_values.results[\"global_step\"]\n\n if self._last_saved_step is None:\n self._summary_writer.add_session_log(\n SessionLog(status=SessionLog.START), global_step)\n\n if self._request_summary:\n self._last_saved_step = global_step\n if \"summary\" in run_values.results:\n self._summary_writer.add_summary(run_values.results[\"summary\"],\n global_step)\n\n self._request_summary = (\n global_step >= self._last_saved_step + self._save_steps - 1)\n\n def end(self, session=None):\n if self._summary_writer:\n self._summary_writer.flush()\n\n\ndef _as_graph_element(obj):\n \"\"\"Retrieves Graph element.\"\"\"\n graph = ops.get_default_graph()\n if not isinstance(obj, six.string_types):\n if not hasattr(obj, \"graph\") or obj.graph != graph:\n raise ValueError(\"Passed %s should have graph attribute that is equal \"\n \"to current graph %s.\" % (obj, graph))\n return obj\n if \":\" in obj:\n element = graph.as_graph_element(obj)\n else:\n element = graph.as_graph_element(obj + \":0\")\n # Check that there is no :1 (e.g. it's single output).\n try:\n graph.as_graph_element(obj + \":1\")\n except (KeyError, ValueError):\n pass\n else:\n raise ValueError(\"Name %s is ambiguous, \"\n \"as this `Operation` has multiple outputs \"\n \"(at least 2).\" % obj)\n return element\n"
] |
[
[
"tensorflow.python.training.saver.latest_checkpoint",
"tensorflow.core.util.event_pb2.SessionLog",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs",
"tensorflow.contrib.framework.deprecated_arg_values",
"tensorflow.python.platform.tf_logging.debug",
"tensorflow.python.platform.tf_logging.warning",
"numpy.allclose",
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.core.framework.summary_pb2.Summary.Value",
"tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.training.summary_io.SummaryWriter"
],
[
"tensorflow.core.util.event_pb2.SessionLog",
"numpy.isnan",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.contrib.learn.python.learn.session_run_hook.SessionRunArgs",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.core.framework.summary_pb2.Summary.Value",
"tensorflow.contrib.learn.python.learn.summary_writer_cache.SummaryWriterCache.get",
"tensorflow.python.framework.ops.get_default_graph"
]
] |
marmus12/CornerView
|
[
"f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b"
] |
[
"epinet_fun/util.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 23 15:54:01 2018\n\n@author: shinyonsei2\n\"\"\"\n\nimport numpy as np\nimport imageio\n\n\n\ndef read_pfm(fpath, expected_identifier=\"Pf\"):\n # PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html\n \n def _get_next_line(f):\n next_line = f.readline().decode('utf-8').rstrip()\n # ignore comments\n while next_line.startswith('#'):\n next_line = f.readline().rstrip()\n return next_line\n \n with open(fpath, 'rb') as f:\n # header\n identifier = _get_next_line(f)\n if identifier != expected_identifier:\n raise Exception('Unknown identifier. Expected: \"%s\", got: \"%s\".' % (expected_identifier, identifier))\n\n try:\n line_dimensions = _get_next_line(f)\n dimensions = line_dimensions.split(' ')\n width = int(dimensions[0].strip())\n height = int(dimensions[1].strip())\n except:\n raise Exception('Could not parse dimensions: \"%s\". '\n 'Expected \"width height\", e.g. \"512 512\".' % line_dimensions)\n\n try:\n line_scale = _get_next_line(f)\n scale = float(line_scale)\n assert scale != 0\n if scale < 0:\n endianness = \"<\"\n else:\n endianness = \">\"\n except:\n raise Exception('Could not parse max value / endianess information: \"%s\". '\n 'Should be a non-zero number.' % line_scale)\n\n try:\n data = np.fromfile(f, \"%sf\" % endianness)\n data = np.reshape(data, (height, width))\n data = np.flipud(data)\n with np.errstate(invalid=\"ignore\"):\n data *= abs(scale)\n except:\n raise Exception('Invalid binary values. Could not create %dx%d array from input.' % (height, width))\n\n return data\n \ndef load_LFdata(dir_LFimages,hci_root): \n traindata_all=np.zeros((len(dir_LFimages), 512, 512, 9, 9, 3),np.uint8)\n traindata_label=np.zeros((len(dir_LFimages), 512, 512),np.float32)\n \n image_id=0\n for dir_LFimage in dir_LFimages:\n print(dir_LFimage)\n for i in range(81):\n try:\n tmp = np.float32(imageio.imread(hci_root + dir_LFimage+'/input_Cam0%.2d.png' % i)) # load LF images(9x9) \n except:\n print(hci_root + dir_LFimage+'/input_Cam0%.2d.png..does not exist' % i )\n traindata_all[image_id,:,:,i//9,i-9*(i//9),:]=tmp \n del tmp\n try: \n tmp = np.float32(read_pfm(hci_root +dir_LFimage+'/gt_disp_lowres.pfm')) # load LF disparity map\n except:\n print(hci_root + dir_LFimage+'/gt_disp_lowres.pfm..does not exist' % i ) \n traindata_label[image_id,:,:]=tmp \n del tmp\n image_id=image_id+1\n return traindata_all, traindata_label\n\ndef load_depth_gts(gt_dir,dir_LFimages): \n w_views = 9\n n_views = w_views**2\n traindata_label=np.zeros((len(dir_LFimages), 512, 512, n_views),np.float32)\n \n image_id=0\n for dir_LFimage in dir_LFimages:\n sample_name = dir_LFimage.split('/')[-1]\n print(\"loading additional gt.. \" + sample_name)\n for i in range(n_views):\n\n# try: 0%.2d.png\n tmp = np.float32(read_pfm(gt_dir +sample_name+'/gt_disp_lowres_Cam0%.2d.pfm' %i)) # load LF disparity map\n# except:\n# print(hci_root + dir_LFimage+'\\gt_disp_lowres.pfm..does not exist' % i ) \n traindata_label[image_id,:,:,i]=tmp \n del tmp\n image_id=image_id+1\n return traindata_label\n\n\n\n\n"
] |
[
[
"numpy.flipud",
"numpy.errstate",
"numpy.reshape",
"numpy.fromfile"
]
] |
sgbaird/automatminer
|
[
"9a3996e37672b547f10645b53b816ee670940d56"
] |
[
"automatminer/utils/tests/test_pkg.py"
] |
[
"\"\"\"\nAssorted package utils.\n\"\"\"\nimport os\nimport unittest\n\nimport pandas as pd\nfrom automatminer import __version__\nfrom automatminer.base import DFTransformer\nfrom automatminer.utils.pkg import (\n AMM_SUPPORTED_EXTS,\n check_fitted,\n compare_columns,\n get_version,\n save_dict_to_file,\n set_fitted,\n)\nfrom sklearn.exceptions import NotFittedError\n\n\nclass MyTransformer(DFTransformer):\n def __init__(self):\n super(MyTransformer, self).__init__()\n\n @set_fitted\n def fit(self, df, target):\n return df\n\n @check_fitted\n def transform(self, df, target):\n return df\n\n\nclass TestPackageTools(unittest.TestCase):\n def setUp(self) -> None:\n self.remant_base_path = os.path.dirname(__file__)\n self.remant_file_prefix = \"saved\"\n\n def test_compare_columns(self):\n df1 = pd.DataFrame({\"a\": [1, 2], \"b\": [2, 3]})\n df2 = pd.DataFrame({\"b\": [3, 4], \"c\": [4, 5]})\n comparison = compare_columns(df1, df2)\n self.assertTrue(comparison[\"mismatch\"])\n self.assertListEqual(comparison[\"df1_not_in_df2\"], [\"a\"])\n self.assertListEqual(comparison[\"df2_not_in_df1\"], [\"c\"])\n\n comparison2 = compare_columns(df1, df1)\n self.assertFalse(comparison2[\"mismatch\"])\n\n comparison3 = compare_columns(df1, df2, ignore=[\"c\"])\n self.assertTrue(comparison3[\"mismatch\"])\n self.assertListEqual(comparison3[\"df1_not_in_df2\"], [\"a\"])\n self.assertListEqual(comparison3[\"df2_not_in_df1\"], [])\n\n def test_fitting_decorations(self):\n df = pd.DataFrame({\"a\": [1, 2], \"b\": [2, 3]})\n mt = MyTransformer()\n\n self.assertFalse(mt.is_fit)\n mt.fit(df, \"\")\n self.assertTrue(mt.is_fit)\n df = mt.transform(df, \"\")\n\n mt2 = MyTransformer()\n self.assertRaises(NotFittedError, mt2.transform, [df, \"\"])\n\n def test_save_dict_to_file(self):\n test_dict = {\"a\": \"A\", \"b\": 1, \"c\": [1, \"q\"], \"d\": {\"m\": [3, 4]}}\n for ext in AMM_SUPPORTED_EXTS:\n filename = self._get_remnant_path(ext)\n save_dict_to_file(test_dict, filename=filename)\n self.assertTrue(os.path.isfile(filename))\n\n def test_get_version(self):\n v = get_version()\n self.assertEqual(v, __version__)\n\n def tearDown(self) -> None:\n remnants = [self._get_remnant_path(ext) for ext in AMM_SUPPORTED_EXTS]\n for remnant in remnants:\n if os.path.exists(remnant):\n os.remove(remnant)\n\n def _get_remnant_path(self, ext):\n relative_fname = self.remant_file_prefix + ext\n filename = os.path.join(self.remant_base_path, relative_fname)\n return filename\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"pandas.DataFrame"
]
] |
zysite/parser
|
[
"8ed9ccb8e542655fd6fd1b6f7faaf084d13a866e"
] |
[
"supar/structs/fn.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport torch\nfrom supar.utils.common import MIN\nfrom supar.utils.fn import pad\nfrom torch.autograd import Function\n\n\ndef tarjan(sequence):\n r\"\"\"\n Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.\n\n Args:\n sequence (list):\n List of head indices.\n\n Yields:\n A list of indices making up a SCC. All self-loops are ignored.\n\n Examples:\n >>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle\n [2, 5, 1]\n \"\"\"\n\n sequence = [-1] + sequence\n # record the search order, i.e., the timestep\n dfn = [-1] * len(sequence)\n # record the the smallest timestep in a SCC\n low = [-1] * len(sequence)\n # push the visited into the stack\n stack, onstack = [], [False] * len(sequence)\n\n def connect(i, timestep):\n dfn[i] = low[i] = timestep[0]\n timestep[0] += 1\n stack.append(i)\n onstack[i] = True\n\n for j, head in enumerate(sequence):\n if head != i:\n continue\n if dfn[j] == -1:\n yield from connect(j, timestep)\n low[i] = min(low[i], low[j])\n elif onstack[j]:\n low[i] = min(low[i], dfn[j])\n\n # a SCC is completed\n if low[i] == dfn[i]:\n cycle = [stack.pop()]\n while cycle[-1] != i:\n onstack[cycle[-1]] = False\n cycle.append(stack.pop())\n onstack[i] = False\n # ignore the self-loop\n if len(cycle) > 1:\n yield cycle\n\n timestep = [0]\n for i in range(len(sequence)):\n if dfn[i] == -1:\n yield from connect(i, timestep)\n\n\ndef chuliu_edmonds(s):\n r\"\"\"\n ChuLiu/Edmonds algorithm for non-projective decoding :cite:`mcdonald-etal-2005-non`.\n\n Some code is borrowed from `tdozat's implementation`_.\n Descriptions of notations and formulas can be found in :cite:`mcdonald-etal-2005-non`.\n\n Notes:\n The algorithm does not guarantee to parse a single-root tree.\n\n Args:\n s (~torch.Tensor): ``[seq_len, seq_len]``.\n Scores of all dependent-head pairs.\n\n Returns:\n ~torch.Tensor:\n A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.\n\n .. _tdozat's implementation:\n https://github.com/tdozat/Parser-v3\n \"\"\"\n\n s[0, 1:] = MIN\n # prevent self-loops\n s.diagonal()[1:].fill_(MIN)\n # select heads with highest scores\n tree = s.argmax(-1)\n # return the cycle finded by tarjan algorithm lazily\n cycle = next(tarjan(tree.tolist()[1:]), None)\n # if the tree has no cycles, then it is a MST\n if not cycle:\n return tree\n # indices of cycle in the original tree\n cycle = torch.tensor(cycle)\n # indices of noncycle in the original tree\n noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)\n noncycle = torch.where(noncycle.gt(0))[0]\n\n def contract(s):\n # heads of cycle in original tree\n cycle_heads = tree[cycle]\n # scores of cycle in original tree\n s_cycle = s[cycle, cycle_heads]\n\n # calculate the scores of cycle's potential dependents\n # s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle\n s_dep = s[noncycle][:, cycle]\n # find the best cycle head for each noncycle dependent\n deps = s_dep.argmax(1)\n # calculate the scores of cycle's potential heads\n # s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle\n # a(v) is the predecessor of v in cycle\n # s(cycle) = sum(s(a(v)->v))\n s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()\n # find the best noncycle head for each cycle dependent\n heads = s_head.argmax(0)\n\n contracted = torch.cat((noncycle, torch.tensor([-1])))\n # calculate the scores of contracted graph\n s = s[contracted][:, contracted]\n # set the contracted graph scores of cycle's potential dependents\n s[:-1, -1] = s_dep[range(len(deps)), deps]\n # set the contracted graph scores of cycle's potential heads\n s[-1, :-1] = s_head[heads, range(len(heads))]\n\n return s, heads, deps\n\n # keep track of the endpoints of the edges into and out of cycle for reconstruction later\n s, heads, deps = contract(s)\n\n # y is the contracted tree\n y = chuliu_edmonds(s)\n # exclude head of cycle from y\n y, cycle_head = y[:-1], y[-1]\n\n # fix the subtree with no heads coming from the cycle\n # len(y) denotes heads coming from the cycle\n subtree = y < len(y)\n # add the nodes to the new tree\n tree[noncycle[subtree]] = noncycle[y[subtree]]\n # fix the subtree with heads coming from the cycle\n subtree = ~subtree\n # add the nodes to the tree\n tree[noncycle[subtree]] = cycle[deps[subtree]]\n # fix the root of the cycle\n cycle_root = heads[cycle_head]\n # break the cycle and add the root of the cycle to the tree\n tree[cycle[cycle_root]] = noncycle[cycle_head]\n\n return tree\n\n\ndef mst(scores, mask, multiroot=False):\n r\"\"\"\n MST algorithm for decoding non-projective trees.\n This is a wrapper for ChuLiu/Edmonds algorithm.\n\n The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,\n If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find\n best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.\n Otherwise the resulting trees are directly taken as the final outputs.\n\n Args:\n scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.\n Scores of all dependent-head pairs.\n mask (~torch.BoolTensor): ``[batch_size, seq_len]``.\n The mask to avoid parsing over padding tokens.\n The first column serving as pseudo words for roots should be ``False``.\n multiroot (bool):\n Ensures to parse a single-root tree If ``False``.\n\n Returns:\n ~torch.Tensor:\n A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.\n\n Examples:\n >>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],\n [-60.6957, -60.2866, -48.6457, -63.8125],\n [-38.1747, -49.9296, -45.2733, -49.5571],\n [-19.7504, -23.9066, -9.9139, -16.2088]]])\n >>> scores[:, 0, 1:] = MIN\n >>> scores.diagonal(0, 1, 2)[1:].fill_(MIN)\n >>> mask = torch.tensor([[False, True, True, True]])\n >>> mst(scores, mask)\n tensor([[0, 2, 0, 2]])\n \"\"\"\n\n batch_size, seq_len, _ = scores.shape\n scores = scores.cpu().unbind()\n\n preds = []\n for i, length in enumerate(mask.sum(1).tolist()):\n s = scores[i][:length+1, :length+1]\n tree = chuliu_edmonds(s)\n roots = torch.where(tree[1:].eq(0))[0] + 1\n if not multiroot and len(roots) > 1:\n s_root = s[:, 0]\n s_best = MIN\n s = s.index_fill(1, torch.tensor(0), MIN)\n for root in roots:\n s[:, 0] = MIN\n s[root, 0] = s_root[root]\n t = chuliu_edmonds(s)\n s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()\n if s_tree > s_best:\n s_best, tree = s_tree, t\n preds.append(tree)\n\n return pad(preds, total_length=seq_len).to(mask.device)\n\n\nclass SampledLogsumexp(Function):\n\n @staticmethod\n def forward(ctx, x, dim=-1):\n ctx.dim = dim\n ctx.save_for_backward(x)\n return x.logsumexp(dim=dim)\n\n @staticmethod\n def backward(ctx, grad_output):\n from torch.distributions import OneHotCategorical\n x, dim = ctx.saved_tensors, ctx.dim\n if ctx.needs_input_grad[0]:\n return grad_output.unsqueeze(dim).mul(OneHotCategorical(logits=x.movedim(dim, -1)).sample().movedim(-1, dim)), None\n return None, None\n\n\nclass Sparsemax(Function):\n\n @staticmethod\n def forward(ctx, x, dim=-1):\n ctx.dim = dim\n sorted_x, _ = x.sort(dim, True)\n z = sorted_x.cumsum(dim) - 1\n k = x.new_tensor(range(1, sorted_x.size(dim) + 1)).view(-1, *[1] * (x.dim() - 1)).transpose(0, dim)\n k = (k * sorted_x).gt(z).sum(dim, True)\n tau = z.gather(dim, k - 1) / k\n p = torch.clamp(x - tau, 0)\n ctx.save_for_backward(k, p)\n return p\n\n @staticmethod\n def backward(ctx, grad_output):\n k, p, dim = *ctx.saved_tensors, ctx.dim\n grad = grad_output.masked_fill(p.eq(0), 0)\n grad = torch.where(p.ne(0), grad - grad.sum(dim, True) / k, grad)\n return grad, None\n\n\nsampled_logsumexp = SampledLogsumexp.apply\n\nsparsemax = Sparsemax.apply\n"
] |
[
[
"torch.tensor",
"torch.clamp"
]
] |
Voda88/mlops
|
[
"412e95b6580e9820d4e57f93bd4c52ec877162eb"
] |
[
"scripts/train_model.py"
] |
[
"\"\"\"\nCopyright (C) Microsoft Corporation. All rights reserved.โ\n โ\nMicrosoft Corporation (โMicrosoftโ) grants you a nonexclusive, perpetual,\nroyalty-free right to use, copy, and modify the software code provided by us\n(\"Software Code\"). You may not sublicense the Software Code or any use of it\n(except to your affiliates and to vendors to perform work on your behalf)\nthrough distribution, network access, service agreement, lease, rental, or\notherwise. This license does not purport to express any claim of ownership over\ndata you may have shared with Microsoft in the creation of the Software Code.\nUnless applicable law gives you more rights, Microsoft reserves all other\nrights not expressly granted herein, whether by implication, estoppel or\notherwise. โ\n โ\nTHE SOFTWARE CODE IS PROVIDED โAS ISโ, WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\nMICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\nIN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nfrom azureml.core.run import Run\nimport os\nimport argparse\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nimport joblib\nimport json\n\n\ndef train_model(run, data, alpha):\n run.log(\"alpha\", alpha)\n run.parent.log(\"alpha\", alpha)\n reg = Ridge(alpha=alpha)\n reg.fit(data[\"train\"][\"X\"], data[\"train\"][\"y\"])\n preds = reg.predict(data[\"test\"][\"X\"])\n run.log(\"mse\", mean_squared_error(\n preds, data[\"test\"][\"y\"]), description=\"Mean squared error metric\")\n run.parent.log(\"mse\", mean_squared_error(\n preds, data[\"test\"][\"y\"]), description=\"Mean squared error metric\")\n return reg\n\n\ndef main():\n print(\"Running train.py\")\n\n parser = argparse.ArgumentParser(\"train\")\n parser.add_argument(\n \"--build_id\",\n type=str,\n help=\"The build ID of the build triggering this pipeline run\",\n )\n parser.add_argument(\n \"--model_name\",\n type=str,\n help=\"Name of the Model\",\n default=\"sklearn_regression_model.pkl\",\n )\n\n parser.add_argument(\n \"--step_output\",\n type=str,\n help=(\"output for passing data to next step\")\n )\n\n args = parser.parse_args()\n\n print(\"Argument [build_id]: %s\" % args.build_id)\n print(\"Argument [model_name]: %s\" % args.model_name)\n print(\"Argument [step_output]: %s\" % args.step_output)\n\n model_name = args.model_name\n build_id = args.build_id\n step_output_path = args.step_output\n\n print(\"Getting training parameters\")\n\n alpha = 0.5\n\n print(\"Parameter alpha: %s\" % alpha)\n\n run = Run.get_context()\n\n # Get the dataset\n dataset = run.input_datasets['training_data']\n if (dataset):\n df = dataset.to_pandas_dataframe()\n X = df.values\n y = df.Y\n else:\n e = (\"No dataset provided\")\n print(e)\n raise Exception(e)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=0)\n data = {\"train\": {\"X\": X_train, \"y\": y_train},\n \"test\": {\"X\": X_test, \"y\": y_test}}\n\n reg = train_model(run, data, alpha)\n\n # Pass model file to next step\n os.makedirs(step_output_path, exist_ok=True)\n model_output_path = os.path.join(step_output_path, model_name)\n joblib.dump(value=reg, filename=model_output_path)\n\n # Also upload model file to run outputs for history\n os.makedirs('outputs', exist_ok=True)\n output_path = os.path.join('outputs', model_name)\n joblib.dump(value=reg, filename=output_path)\n\n # Add properties to identify this specific training run\n run.parent.tag(\"BuildId\", value=build_id)\n run.tag(\"BuildId\", value=build_id)\n run.tag(\"run_type\", value=\"train\")\n builduri_base = os.environ.get(\"BUILDURI_BASE\")\n if (builduri_base is not None):\n build_uri = builduri_base + build_id\n run.tag(\"BuildUri\", value=build_uri)\n run.parent.tag(\"BuildUri\", value=build_uri)\n print(f\"tags now present for run: {run.tags}\")\n\n run.complete()\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Ridge"
]
] |
jp2011/spatial-poisson-mixtures
|
[
"9e535a636e710a9fa146cbbd4613ece70ec90791"
] |
[
"src/models/block_mixture_gp_softmax.py"
] |
[
"import logging\nimport os\nimport pickle\nimport sys\nfrom pathlib import Path\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport zsampler\nfrom dotenv import load_dotenv, find_dotenv\nfrom scipy.special import logsumexp, softmax\n\nfrom src.inference.context_geo import GridContextGeo, gp_inflate_duplicate, gp_deflate_sum\nfrom src.inference.hmc import HMCSampler\nfrom src.inference.priors import BetaPriorWithIntercept, GaussianPrior, GPNonGridPriorSqExpFixed\nfrom src.experiment.visualize import plot_traceplots\n\n\nclass BlockMixtureGpSoftmaxAllocation:\n\n def __init__(self, *, uid=None,\n grid_context=None,\n K=1,\n block_type=\"msoa\",\n hmc_all_iterations=100_000,\n hmc_burn_in=25_000,\n hmc_calibration=50_000,\n hmc_info_interval=20_000,\n hmc_thinning=5,\n verbose=False,\n lengthscale=1):\n self.uid = uid\n self.context = grid_context\n self.K = K\n self.NN = self.context.mask.shape[0]\n self.hmc_thinning = hmc_thinning\n self.hmc_info_interval = hmc_info_interval\n self.N = grid_context.counts.shape[0]\n self.J = self.context.J\n\n # do a random assignment to mixtures\n initial_Z = np.zeros((self.N, self.K), dtype=int)\n initial_Z[np.arange(self.N), np.random.choice(self.K, self.N)] = 1\n\n self.Z_samples = []\n\n # Create an (N x 1) vector which gives the corresponding block for each cell.\n if block_type == \"lad\":\n block_assignment = np.asarray(grid_context.lads)\n elif block_type == \"msoa\":\n block_assignment = np.asarray(grid_context.msoas)\n elif block_type == \"ward\":\n block_assignment = np.asarray(grid_context.wards)\n else:\n block_assignment = np.repeat(1, self.N) # a single block\n\n # read in block centroid coordinates\n block_centroid_file_path = Path(os.getcwd()) / \"data\" / \"processed\" / f\"{block_type}-centroids-map.csv\"\n block_centroids = pd.read_csv(block_centroid_file_path)\n self.coord_x = block_centroids[\"x\"].values\n self.coord_x = self.coord_x - np.min(self.coord_x)\n self.coord_y = block_centroids[\"y\"].values\n self.coord_y = self.coord_y - np.min(self.coord_y)\n\n self.block_labels = block_centroids.iloc[:, 1].values\n\n # Create the cell <-> block mapping (mind the ordering of the blocks)\n unique_block_labels = np.unique(self.block_labels)\n self.block_assignment_numeric = np.zeros(block_assignment.shape[0], dtype=np.int)\n for idx_cell, block_label in enumerate(block_assignment):\n self.block_assignment_numeric[idx_cell] = np.where(unique_block_labels == block_label)[0]\n self.block_assignment = block_assignment\n B = np.max(self.block_assignment_numeric) + 1\n self.B = B\n\n self.lengthscale = lengthscale\n\n # Priors\n self.beta_prior = BetaPriorWithIntercept(a=1, b=0.01)\n self.f_prior = GPNonGridPriorSqExpFixed(coord_x=self.coord_x, coord_y=self.coord_y,\n variance=100, lengthscale=self.lengthscale)\n self.log_theta_prior = GaussianPrior(mean=np.asarray([0]), variance=np.asarray([1e2]))\n\n init_beta_estimand = np.random.normal(0, 1, self.context.J * self.K)\n init_beta_mass_matrix = 1e3 * np.ones(self.context.J * self.K)\n self.beta_sampler = HMCSampler(func_lpdf=self.beta_loglik,\n func_nabla_lpdf=self.nabla_beta_loglik,\n func_plot=self.plot_beta if verbose else None,\n init_estimand=init_beta_estimand,\n init_M_diag=init_beta_mass_matrix,\n init_L=20,\n init_epsilon=5.0e-2,\n n_burnin=hmc_burn_in,\n n_calib=hmc_calibration,\n S=hmc_all_iterations,\n n_info_interval=hmc_info_interval,\n thinning=hmc_thinning,\n unique_estimation_id=uid,\n adaptive=True)\n\n init_f_estimand = np.random.normal(0, 1, B * self.K)\n init_f_mass_matrix = 1e4 * np.ones(B * self.K)\n self.f_sampler = HMCSampler(func_lpdf=self.f_loglik,\n func_nabla_lpdf=self.nabla_f_loglik,\n func_plot=self.plot_f if verbose else None,\n init_estimand=init_f_estimand,\n init_M_diag=init_f_mass_matrix,\n init_L=100,\n init_epsilon=5.0e-2,\n n_burnin=hmc_burn_in,\n n_calib=hmc_calibration,\n S=hmc_all_iterations,\n n_info_interval=hmc_info_interval,\n thinning=hmc_thinning,\n unique_estimation_id=uid,\n adaptive=False)\n\n self.current_beta = self.beta_sampler.estimand\n self.current_f = self.f_sampler.estimand\n self.current_Z = initial_Z\n\n self.logger = logging.getLogger(__name__)\n\n def beta_loglik(self, beta_estimand):\n\n beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F') # build a J x K matrix\n Z = self.current_Z\n\n counts = self.context.counts\n covariates = self.context.covariates\n\n fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)\n\n poisson_part = np.sum(np.multiply(counts, fixed_effects) - np.exp(fixed_effects))\n beta_part = self.beta_prior.log_pdf(beta_estimand, self.J)\n\n output = poisson_part + beta_part\n return output\n\n def nabla_beta_loglik(self, beta_estimand):\n beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F') # build a J x K matrix\n\n counts = self.context.counts\n covariates = self.context.covariates\n Z = self.current_Z\n fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)\n nabla_beta_matrix = np.zeros(beta_matrix.shape)\n nabla_beta_matrix += np.dot(covariates.T, Z * counts[:, np.newaxis])\n\n temp = np.exp(fixed_effects)\n nabla_beta_matrix += (- np.dot(covariates.T, Z * temp[:, np.newaxis]))\n nabla_beta = nabla_beta_matrix.flatten('F')\n nabla_beta += self.beta_prior.nabla_beta_log_pdf(beta_estimand, self.J)\n\n output = nabla_beta\n return output\n\n\n def plot_beta(self, beta_samples):\n beta_samples_array = np.asarray(beta_samples)\n for k in range(self.K):\n beta_k_samples = beta_samples_array[:, (k * self.J):((k + 1) * self.J)]\n plot_traceplots(beta_k_samples, self.context.covariates_names)\n plt.show()\n\n def sample_Z(self):\n beta_matrix = np.reshape(self.current_beta, (self.J, self.K), order='F') # build a J x K matrix\n f_matrix = np.reshape(self.current_f, (self.B, self.K), order='F')\n Z = self.current_Z\n\n f_full_matrix = gp_inflate_duplicate(f_matrix,\n self.block_assignment_numeric,\n self.N, self.K)\n counts = self.context.counts\n covariates = self.context.covariates\n\n fixed_effects_all = np.dot(covariates, beta_matrix)\n counts_matrix = np.repeat(counts.reshape((-1, 1)), self.K, axis=1)\n\n poi_lik = counts_matrix * fixed_effects_all - np.exp(fixed_effects_all)\n gp_log_softmax = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]\n\n prob = softmax(poi_lik + gp_log_softmax, axis=1)\n\n new_Z = zsampler.sample_bulk_categorical(Z.astype(np.int64), prob.astype(np.float64))\n return new_Z\n\n def f_loglik(self, F_estimand):\n\n f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')\n Z = self.current_Z\n\n f_full_matrix = gp_inflate_duplicate(f_matrix,\n self.block_assignment_numeric,\n self.N, self.K)\n output = 0\n temp = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]\n output += np.sum(np.multiply(Z, temp))\n\n for k in range(self.K):\n # GP contribution\n output += self.f_prior.get_logpdf(f=f_matrix[:, k])\n\n return output\n\n def nabla_f_loglik(self, F_estimand):\n f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')\n\n f_full_matrix = gp_inflate_duplicate(f_matrix,\n self.block_assignment_numeric,\n self.N, self.K)\n Z = self.current_Z\n\n f_gradient = np.zeros(f_matrix.shape)\n\n # nabla f poisson mixture\n temp_matrix = 1 - np.exp(f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis])\n inflated_output_matrix = np.multiply(Z, temp_matrix)\n f_gradient += gp_deflate_sum(inflated_output_matrix, self.block_assignment_numeric, self.N, self.B, self.K)\n\n for k in range(self.K):\n f_gradient[:, k] += self.f_prior.get_nabla_f(f=f_matrix[:, k])\n\n return f_gradient.flatten(order='F')\n\n def plot_f(self, F_samples):\n\n f_array = np.asarray(F_samples).reshape((-1, self.B, self.K), order='F')\n S = f_array.shape[0]\n\n # discard irrelevant samples\n self.Z_samples = self.Z_samples[(-S):]\n Z_samples_array = np.asarray(self.Z_samples)\n\n mixture_allocation = np.zeros((S, self.N, self.K))\n mixture_allocation[np.repeat(range(S), self.N), np.tile(range(self.N), S), Z_samples_array.flatten(order='C')] = 1\n average_alloc = np.mean(mixture_allocation, axis=0)\n\n for k in range(self.K):\n plt.figure()\n self.context.plot_realisations(average_alloc[:, k], 111)\n plt.show()\n\n # plot a random traceplot\n idx1 = np.random.choice(self.B)\n plot_traceplots(f_array[:, idx1, :], [f\"IDX: {idx1}: K={k}\" for k in range(self.K)])\n plt.show()\n\n latent_weight_samples = softmax(np.mean(f_array, axis=0), axis=1)\n latent_weight_samples_full = gp_inflate_duplicate(latent_weight_samples,\n self.block_assignment_numeric,\n self.N, self.K)\n plt.figure()\n for k in range(self.K):\n self.context.plot_realisations(latent_weight_samples_full[:, k], 111)\n plt.show()\n\n\n def load_samples_snapshot(self, iteration_no):\n beta_filepath = Path(os.getcwd()) / \"models\" / \"snapshots\" / f\"beta-samples--{self.uid}--{iteration_no}.npy\"\n F_filepath = Path(os.getcwd()) / \"models\" / \"snapshots\" / f\"F-samples--{self.uid}--{iteration_no}.npy\"\n Z_filepath = Path(os.getcwd()) / \"models\" / \"snapshots\" / f\"Z-samples--{self.uid}--{iteration_no}.npy\"\n beta_samples = np.load(beta_filepath)\n F_samples = np.load(F_filepath)\n Z_samples = np.load(Z_filepath)\n return beta_samples, Z_samples, F_samples\n\n def __save_output(self, iteration):\n\n folder_name = Path(os.getcwd()) / \"models\" / \"snapshots\"\n\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\n F_full_path = folder_name / f\"F-samples--{self.uid}--{iteration}\"\n F_samples_array = np.asarray(self.f_sampler.samples)\n if F_samples_array.shape[0] > 0:\n np.save(F_full_path, F_samples_array[::self.hmc_thinning, :])\n\n beta_full_path = folder_name / f\"beta-samples--{self.uid}--{iteration}\"\n beta_array = np.asarray(self.beta_sampler.samples)\n if beta_array.shape[0] > 0:\n np.save(beta_full_path, beta_array[::self.hmc_thinning, :])\n\n Z_full_path = folder_name / f\"Z-samples--{self.uid}--{iteration}\"\n Z_array = np.asarray(self.Z_samples)\n if Z_array.shape[0] > 0:\n np.save(Z_full_path, Z_array[::self.hmc_thinning, :])\n\n def run_sampling(self, number_of_iterations):\n iteration = 0\n while iteration < number_of_iterations:\n\n ##########################################################################################\n # BOOKKEEPING\n ##########################################################################################\n # The HMC samplers are independently adaptive and therefore will discard samples during the adaptive phase.\n num_current_samples = min(len(self.beta_sampler.samples),\n len(self.f_sampler.samples))\n\n self.beta_sampler.samples = self.beta_sampler.samples[(-num_current_samples):]\n self.f_sampler.samples = self.f_sampler.samples[(-num_current_samples):]\n self.Z_samples = self.Z_samples[(-num_current_samples):]\n\n if (iteration + 1) % self.hmc_info_interval == 0:\n self.__save_output(iteration)\n\n ##########################################################################################\n # SAMPLE BETA\n ##########################################################################################\n self.beta_sampler.sample_one()\n self.current_beta = self.beta_sampler.estimand\n\n ##########################################################################################\n # SAMPLE Z\n ##########################################################################################\n new_Z = self.sample_Z()\n self.Z_samples.append(np.where(new_Z > 0)[1])\n self.current_Z = new_Z\n\n ##########################################################################################\n # SAMPLE F\n ##########################################################################################\n self.f_sampler.sample_one()\n self.current_f = self.f_sampler.estimand\n\n iteration += 1\n\n self.logger.info(\"Sampling completed - saving model.\")\n self.__save_output(iteration)\n\n\n@click.command()\n@click.option('--year', '-y', type=str, default='12013-122015')\n@click.option('--type', '-t', default='burglary')\n@click.option('--resolution', '-r', type=int, default=400)\n@click.option('--model_name', '-m', type=str, default='burglary_raw_4')\n@click.option('--interpolation', '-i', type=str, default='weighted')\n@click.option('--num_mixtures', '-K', type=int, default=3)\n@click.option('--uid', type=str, default=None)\n@click.option('--verbose', is_flag=True)\n@click.option('--block_type', type=str, default=\"lad\")\n@click.option('--collection_unit', type=str, default=\"lsoa\")\n@click.option('--lengthscale', type=float, default=1500.0)\ndef main(year, type, resolution, model_name, interpolation, num_mixtures, uid, verbose,\n block_type, collection_unit, lengthscale):\n if uid is None:\n uid = f\"blockmixgp--{block_type}--{type}--{model_name}--{interpolation}--{num_mixtures}--{resolution}-{year}\"\n\n log_fmt = '[%(levelname)s] [%(asctime)s] [%(name)s] %(message)s'\n datefmt = '%H:%M:%S'\n if verbose:\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=log_fmt)\n else:\n logging.basicConfig(filename=Path('models') / f\"log-{uid}.log\",\n filemode='a',\n format=log_fmt,\n datefmt=datefmt,\n level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n\n logger.info(\"Building the context.\")\n grid_context = GridContextGeo(interpolation=interpolation,\n year=year,\n resolution=resolution,\n crime_type=type,\n model_name=model_name,\n cov_collection_unit=collection_unit,\n covariates_type='raw')\n\n logger.info(\"Writing sampling context into a file.\")\n context_filename = Path(os.getcwd()) / \"models\" / f\"context--{uid}.pickle\"\n with open(context_filename, 'wb') as context_file:\n context_info = {\n 'context': grid_context,\n 'K': num_mixtures\n }\n pickle.dump(context_info, context_file)\n\n logger.info(\"Initialising the model with estimand and mass matrix diagonal\")\n\n hmc_all_iterations = 250_000\n hmc_info_interval = 50_000\n hmc_thinning = 10\n hmc_burn_in = 90_000\n hmc_calibration = 150_000\n\n model = BlockMixtureGpSoftmaxAllocation(uid=uid,\n grid_context=grid_context,\n K=num_mixtures,\n hmc_info_interval=hmc_info_interval,\n hmc_all_iterations=hmc_all_iterations,\n hmc_thinning=hmc_thinning,\n hmc_burn_in=hmc_burn_in,\n hmc_calibration=hmc_calibration,\n block_type=block_type,\n verbose=verbose,\n lengthscale=lengthscale)\n\n model.run_sampling(number_of_iterations=hmc_all_iterations)\n logger.info(\"Procedure finished.\")\n\n\nif __name__ == \"__main__\":\n load_dotenv(find_dotenv())\n main()\n"
] |
[
[
"numpy.dot",
"numpy.random.choice",
"numpy.load",
"numpy.exp",
"numpy.mean",
"numpy.min",
"numpy.multiply",
"numpy.where",
"pandas.read_csv",
"numpy.max",
"numpy.random.normal",
"scipy.special.softmax",
"scipy.special.logsumexp",
"numpy.save",
"numpy.arange",
"numpy.reshape",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.asarray",
"numpy.ones",
"numpy.repeat",
"numpy.unique"
]
] |
LiBinNLP/HOSDP
|
[
"f0806d1c27c9d5233002836e1825a1567891d928"
] |
[
"supar/parsers/dep.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom supar.models import (BiaffineDependencyModel, CRF2oDependencyModel,\n CRFDependencyModel, VIDependencyModel)\nfrom supar.parsers.parser import Parser\nfrom supar.utils import Config, Dataset, Embedding\nfrom supar.utils.common import BOS, PAD, UNK\nfrom supar.utils.field import ChartField, Field, RawField, SubwordField\nfrom supar.utils.fn import ispunct\nfrom supar.utils.logging import get_logger, progress_bar\nfrom supar.utils.metric import AttachmentMetric\nfrom supar.utils.transform import CoNLL\n\nlogger = get_logger(__name__)\n\n\nclass BiaffineDependencyParser(Parser):\n r\"\"\"\n The implementation of Biaffine Dependency Parser :cite:`dozat-etal-2017-biaffine`.\n \"\"\"\n\n NAME = 'biaffine-dependency'\n MODEL = BiaffineDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.TAG = self.transform.CPOS\n self.ARC, self.REL = self.transform.HEAD, self.transform.DEPREL\n\n def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,\n punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n \"\"\"\n\n return super().train(**Config().update(locals()))\n\n def evaluate(self, data, buckets=8, batch_size=5000,\n punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n \"\"\"\n\n return super().evaluate(**Config().update(locals()))\n\n def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,\n tree=True, proj=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n \"\"\"\n\n return super().predict(**Config().update(locals()))\n\n @classmethod\n def load(cls, path, reload=False, src=None, **kwargs):\n r\"\"\"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'biaffine-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('biaffine-dep-en')\n >>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')\n \"\"\"\n\n return super().load(path, reload, src, **kwargs)\n\n def _train(self, loader):\n self.model.train()\n\n bar, metric = progress_bar(loader), AttachmentMetric()\n\n for i, batch in enumerate(bar, 1):\n words, texts, *feats, arcs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_rel = self.model(words, feats)\n loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)\n loss = loss / self.args.update_steps\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n if i % self.args.update_steps == 0:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n metric(arc_preds, rel_preds, arcs, rels, mask)\n bar.set_postfix_str(f\"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}\")\n logger.info(f\"{bar.postfix}\")\n\n @torch.no_grad()\n def _evaluate(self, loader):\n self.model.eval()\n\n total_loss, metric = 0, AttachmentMetric()\n\n for batch in loader:\n words, texts, *feats, arcs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_rel = self.model(words, feats)\n loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n total_loss += loss.item()\n metric(arc_preds, rel_preds, arcs, rels, mask)\n total_loss /= len(loader)\n\n return total_loss, metric\n\n @torch.no_grad()\n def _predict(self, loader):\n self.model.eval()\n\n preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}\n for batch in progress_bar(loader):\n words, texts, *feats = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n lens = mask.sum(1).tolist()\n s_arc, s_rel = self.model(words, feats)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n preds['arcs'].extend(arc_preds[mask].split(lens))\n preds['rels'].extend(rel_preds[mask].split(lens))\n if self.args.prob:\n preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.softmax(-1).unbind())])\n preds['arcs'] = [seq.tolist() for seq in preds['arcs']]\n preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]\n\n return preds\n\n @classmethod\n def build(cls, path, min_freq=2, fix_len=20, **kwargs):\n r\"\"\"\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary.\n Required if taking words as encoder input.\n Default: 2.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n \"\"\"\n\n args = Config(**locals())\n args.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n os.makedirs(os.path.dirname(path) or './', exist_ok=True)\n if os.path.exists(path) and not args.build:\n parser = cls.load(**args)\n parser.model = cls.MODEL(**parser.args)\n parser.model.load_pretrained(parser.WORD.embed).to(args.device)\n return parser\n\n logger.info(\"Building the fields\")\n TAG, CHAR, ELMO, BERT = None, None, None, None\n if args.encoder != 'lstm':\n from transformers import (AutoTokenizer, GPT2Tokenizer,\n GPT2TokenizerFast)\n t = AutoTokenizer.from_pretrained(args.bert)\n WORD = SubwordField('words',\n pad=t.pad_token,\n unk=t.unk_token,\n bos=t.bos_token or t.cls_token,\n fix_len=args.fix_len,\n tokenize=t.tokenize,\n fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)\n WORD.vocab = t.get_vocab()\n else:\n WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)\n if 'tag' in args.feat:\n TAG = Field('tags', bos=BOS)\n if 'char' in args.feat:\n CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)\n if 'elmo' in args.feat:\n from allennlp.modules.elmo import batch_to_ids\n ELMO = RawField('elmo')\n ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)\n if 'bert' in args.feat:\n from transformers import (AutoTokenizer, GPT2Tokenizer,\n GPT2TokenizerFast)\n t = AutoTokenizer.from_pretrained(args.bert)\n BERT = SubwordField('bert',\n pad=t.pad_token,\n unk=t.unk_token,\n bos=t.bos_token or t.cls_token,\n fix_len=args.fix_len,\n tokenize=t.tokenize,\n fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)\n BERT.vocab = t.get_vocab()\n TEXT = RawField('texts')\n ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)\n REL = Field('rels', bos=BOS)\n transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL)\n\n train = Dataset(transform, args.train)\n if args.encoder == 'lstm':\n WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))\n if TAG is not None:\n TAG.build(train)\n if CHAR is not None:\n CHAR.build(train)\n REL.build(train)\n args.update({\n 'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,\n 'n_rels': len(REL.vocab),\n 'n_tags': len(TAG.vocab) if TAG is not None else None,\n 'n_chars': len(CHAR.vocab) if CHAR is not None else None,\n 'char_pad_index': CHAR.pad_index if CHAR is not None else None,\n 'bert_pad_index': BERT.pad_index if BERT is not None else None,\n 'pad_index': WORD.pad_index,\n 'unk_index': WORD.unk_index,\n 'bos_index': WORD.bos_index\n })\n logger.info(f\"{transform}\")\n\n logger.info(\"Building the model\")\n model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)\n logger.info(f\"{model}\\n\")\n\n return cls(args, model, transform)\n\n\nclass CRFDependencyParser(BiaffineDependencyParser):\n r\"\"\"\n The implementation of first-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.\n \"\"\"\n\n NAME = 'crf-dependency'\n MODEL = CRFDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,\n punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n \"\"\"\n\n return super().train(**Config().update(locals()))\n\n def evaluate(self, data, buckets=8, batch_size=5000, punct=False,\n mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n \"\"\"\n\n return super().evaluate(**Config().update(locals()))\n\n def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,\n mbr=True, tree=True, proj=True, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n \"\"\"\n\n return super().predict(**Config().update(locals()))\n\n @classmethod\n def load(cls, path, reload=False, src=None, **kwargs):\n r\"\"\"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'crf-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('crf-dep-en')\n >>> parser = Parser.load('./ptb.crf.dep.lstm.char')\n \"\"\"\n\n return super().load(path, reload, src, **kwargs)\n\n def _train(self, loader):\n self.model.train()\n\n bar, metric = progress_bar(loader), AttachmentMetric()\n\n for i, batch in enumerate(bar, 1):\n words, texts, *feats, arcs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)\n loss = loss / self.args.update_steps\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n if i % self.args.update_steps == 0:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n metric(arc_preds, rel_preds, arcs, rels, mask)\n bar.set_postfix_str(f\"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}\")\n logger.info(f\"{bar.postfix}\")\n\n @torch.no_grad()\n def _evaluate(self, loader):\n self.model.eval()\n\n total_loss, metric = 0, AttachmentMetric()\n\n for batch in loader:\n words, texts, *feats, arcs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n total_loss += loss.item()\n metric(arc_preds, rel_preds, arcs, rels, mask)\n total_loss /= len(loader)\n\n return total_loss, metric\n\n @torch.no_grad()\n def _predict(self, loader):\n self.model.eval()\n\n preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}\n for batch in progress_bar(loader):\n words, texts, *feats = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n lens = mask.sum(1).tolist()\n s_arc, s_rel = self.model(words, feats)\n if self.args.mbr:\n s_arc = self.model.crf(s_arc, mask, mbr=True)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n preds['arcs'].extend(arc_preds[mask].split(lens))\n preds['rels'].extend(rel_preds[mask].split(lens))\n if self.args.prob:\n arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)\n preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])\n preds['arcs'] = [seq.tolist() for seq in preds['arcs']]\n preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]\n\n return preds\n\n\nclass CRF2oDependencyParser(BiaffineDependencyParser):\n r\"\"\"\n The implementation of second-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.\n \"\"\"\n\n NAME = 'crf2o-dependency'\n MODEL = CRF2oDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,\n punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n \"\"\"\n\n return super().train(**Config().update(locals()))\n\n def evaluate(self, data, buckets=8, batch_size=5000, punct=False,\n mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n \"\"\"\n\n return super().evaluate(**Config().update(locals()))\n\n def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,\n mbr=True, tree=True, proj=True, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n mbr (bool):\n If ``True``, performs MBR decoding. Default: ``True``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n \"\"\"\n\n return super().predict(**Config().update(locals()))\n\n @classmethod\n def load(cls, path, reload=False, src=None, **kwargs):\n r\"\"\"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'crf2o-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('crf2o-dep-en')\n >>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')\n \"\"\"\n\n return super().load(path, reload, src, **kwargs)\n\n def _train(self, loader):\n self.model.train()\n\n bar, metric = progress_bar(loader), AttachmentMetric()\n\n for i, batch in enumerate(bar, 1):\n words, texts, *feats, arcs, sibs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)\n loss = loss / self.args.update_steps\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n if i % self.args.update_steps == 0:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n metric(arc_preds, rel_preds, arcs, rels, mask)\n bar.set_postfix_str(f\"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}\")\n logger.info(f\"{bar.postfix}\")\n\n @torch.no_grad()\n def _evaluate(self, loader):\n self.model.eval()\n\n total_loss, metric = 0, AttachmentMetric()\n\n for batch in loader:\n words, texts, *feats, arcs, sibs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)\n arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n total_loss += loss.item()\n metric(arc_preds, rel_preds, arcs, rels, mask)\n total_loss /= len(loader)\n\n return total_loss, metric\n\n @torch.no_grad()\n def _predict(self, loader):\n self.model.eval()\n\n preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}\n for batch in progress_bar(loader):\n words, texts, *feats = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n lens = mask.sum(1).tolist()\n s_arc, s_sib, s_rel = self.model(words, feats)\n if self.args.mbr:\n s_arc = self.model.crf((s_arc, s_sib), mask, mbr=True)\n arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)\n preds['arcs'].extend(arc_preds[mask].split(lens))\n preds['rels'].extend(rel_preds[mask].split(lens))\n if self.args.prob:\n arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)\n preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])\n preds['arcs'] = [seq.tolist() for seq in preds['arcs']]\n preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]\n\n return preds\n\n @classmethod\n def build(cls, path, min_freq=2, fix_len=20, **kwargs):\n r\"\"\"\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The minimum frequency needed to include a token in the vocabulary. Default: 2.\n fix_len (int):\n The max length of all subword pieces. The excess part of each piece will be truncated.\n Required if using CharLSTM/BERT.\n Default: 20.\n kwargs (dict):\n A dict holding the unconsumed arguments.\n \"\"\"\n\n args = Config(**locals())\n args.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n os.makedirs(os.path.dirname(path) or './', exist_ok=True)\n if os.path.exists(path) and not args.build:\n parser = cls.load(**args)\n parser.model = cls.MODEL(**parser.args)\n parser.model.load_pretrained(parser.WORD.embed).to(args.device)\n return parser\n\n logger.info(\"Building the fields\")\n TAG, CHAR, ELMO, BERT = None, None, None, None\n if args.encoder != 'lstm':\n from transformers import (AutoTokenizer, GPT2Tokenizer,\n GPT2TokenizerFast)\n t = AutoTokenizer.from_pretrained(args.bert)\n WORD = SubwordField('words',\n pad=t.pad_token,\n unk=t.unk_token,\n bos=t.bos_token or t.cls_token,\n fix_len=args.fix_len,\n tokenize=t.tokenize,\n fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)\n WORD.vocab = t.get_vocab()\n else:\n WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)\n if 'tag' in args.feat:\n TAG = Field('tags', bos=BOS)\n if 'char' in args.feat:\n CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)\n if 'elmo' in args.feat:\n from allennlp.modules.elmo import batch_to_ids\n ELMO = RawField('elmo')\n ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)\n if 'bert' in args.feat:\n from transformers import (AutoTokenizer, GPT2Tokenizer,\n GPT2TokenizerFast)\n t = AutoTokenizer.from_pretrained(args.bert)\n BERT = SubwordField('bert',\n pad=t.pad_token,\n unk=t.unk_token,\n bos=t.bos_token or t.cls_token,\n fix_len=args.fix_len,\n tokenize=t.tokenize,\n fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)\n BERT.vocab = t.get_vocab()\n TEXT = RawField('texts')\n ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)\n SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs)\n REL = Field('rels', bos=BOS)\n transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL)\n\n train = Dataset(transform, args.train)\n if args.encoder == 'lstm':\n WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))\n if TAG is not None:\n TAG.build(train)\n if CHAR is not None:\n CHAR.build(train)\n REL.build(train)\n args.update({\n 'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,\n 'n_rels': len(REL.vocab),\n 'n_tags': len(TAG.vocab) if TAG is not None else None,\n 'n_chars': len(CHAR.vocab) if CHAR is not None else None,\n 'char_pad_index': CHAR.pad_index if CHAR is not None else None,\n 'bert_pad_index': BERT.pad_index if BERT is not None else None,\n 'pad_index': WORD.pad_index,\n 'unk_index': WORD.unk_index,\n 'bos_index': WORD.bos_index\n })\n logger.info(f\"{transform}\")\n\n logger.info(\"Building the model\")\n model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)\n logger.info(f\"{model}\\n\")\n\n return cls(args, model, transform)\n\n\nclass VIDependencyParser(BiaffineDependencyParser):\n r\"\"\"\n The implementation of Dependency Parser using Variational Inference (:cite:`wang-tu-2020-second`).\n \"\"\"\n\n NAME = 'vi-dependency'\n MODEL = VIDependencyModel\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,\n punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n update_steps (int):\n Gradient accumulation steps. Default: 1.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs.\n \"\"\"\n\n return super().train(**Config().update(locals()))\n\n def evaluate(self, data, buckets=8, batch_size=5000, punct=False,\n tree=True, proj=True, partial=False, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n punct (bool):\n If ``False``, ignores the punctuation during evaluation. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n partial (bool):\n ``True`` denotes the trees are partially annotated. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating evaluation configs.\n\n Returns:\n The loss scalar and evaluation results.\n \"\"\"\n\n return super().evaluate(**Config().update(locals()))\n\n def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,\n tree=True, proj=True, verbose=True, **kwargs):\n r\"\"\"\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n If specified, the predicted results will be saved to the file. Default: ``None``.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n buckets (int):\n The number of buckets that sentences are assigned to. Default: 32.\n batch_size (int):\n The number of tokens in each batch. Default: 5000.\n prob (bool):\n If ``True``, outputs the probabilities. Default: ``False``.\n tree (bool):\n If ``True``, ensures to output well-formed trees. Default: ``False``.\n proj (bool):\n If ``True``, ensures to output projective trees. Default: ``False``.\n verbose (bool):\n If ``True``, increases the output verbosity. Default: ``True``.\n kwargs (dict):\n A dict holding unconsumed arguments for updating prediction configs.\n\n Returns:\n A :class:`~supar.utils.Dataset` object that stores the predicted results.\n \"\"\"\n\n return super().predict(**Config().update(locals()))\n\n @classmethod\n def load(cls, path, reload=False, src=None, **kwargs):\n r\"\"\"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load from cache or download, e.g., ``'vi-dep-en'``.\n - a local path to a pretrained model, e.g., ``./<path>/model``.\n reload (bool):\n Whether to discard the existing cache and force a fresh download. Default: ``False``.\n src (str):\n Specifies where to download the model.\n ``'github'``: github release page.\n ``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).\n Default: None.\n kwargs (dict):\n A dict holding unconsumed arguments for updating training configs and initializing the model.\n\n Examples:\n >>> from supar import Parser\n >>> parser = Parser.load('vi-dep-en')\n >>> parser = Parser.load('./ptb.vi.dep.lstm.char')\n \"\"\"\n\n return super().load(path, reload, src, **kwargs)\n\n def _train(self, loader):\n self.model.train()\n\n bar, metric = progress_bar(loader), AttachmentMetric()\n\n for i, batch in enumerate(bar, 1):\n words, texts, *feats, arcs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)\n loss = loss / self.args.update_steps\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)\n if i % self.args.update_steps == 0:\n self.optimizer.step()\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n metric(arc_preds, rel_preds, arcs, rels, mask)\n bar.set_postfix_str(f\"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}\")\n logger.info(f\"{bar.postfix}\")\n\n @torch.no_grad()\n def _evaluate(self, loader):\n self.model.eval()\n\n total_loss, metric = 0, AttachmentMetric()\n\n for batch in loader:\n words, texts, *feats, arcs, rels = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n s_arc, s_sib, s_rel = self.model(words, feats)\n loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n if self.args.partial:\n mask &= arcs.ge(0)\n # ignore all punctuation if not specified\n if not self.args.punct:\n mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))\n total_loss += loss.item()\n metric(arc_preds, rel_preds, arcs, rels, mask)\n total_loss /= len(loader)\n\n return total_loss, metric\n\n @torch.no_grad()\n def _predict(self, loader):\n self.model.eval()\n\n preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}\n for batch in progress_bar(loader):\n words, texts, *feats = batch\n word_mask = words.ne(self.args.pad_index)\n mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)\n # ignore the first token of each sentence\n mask[:, 0] = 0\n lens = mask.sum(1).tolist()\n s_arc, s_sib, s_rel = self.model(words, feats)\n s_arc = self.model.inference((s_arc, s_sib), mask)\n arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)\n preds['arcs'].extend(arc_preds[mask].split(lens))\n preds['rels'].extend(rel_preds[mask].split(lens))\n if self.args.prob:\n preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.unbind())])\n preds['arcs'] = [seq.tolist() for seq in preds['arcs']]\n preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]\n\n return preds\n"
] |
[
[
"torch.no_grad",
"torch.cuda.is_available"
]
] |
chrwm/otoole
|
[
"f527eb1fdf75cc6872457a6e5145f678f5d34693"
] |
[
"src/otoole/preprocess/narrow_to_datafile.py"
] |
[
"import logging\nimport sys\nfrom abc import abstractmethod\nfrom typing import TextIO\n\nimport pandas as pd\nfrom datapackage import Package\nfrom pandas_datapackage_reader import read_datapackage\nfrom sqlalchemy import create_engine\n\nfrom otoole import read_packaged_file\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataPackageTo(object):\n \"\"\"Convert a data package to another format\n\n Arguments\n ---------\n datapackage: str\n The path to the databackage\n datafilepath: str\n The path to the destination file or folder\n sql: bool, default=False\n Flag to set whether the source datapackage is in sqlite format\n \"\"\"\n\n def __init__(self, datapackage: str, datafilepath: str, sql: bool = False):\n\n self.datapackage = datapackage\n self.datafilepath = datafilepath\n self.sql = sql\n self.package = self._get_package()\n self.default_values = self._get_default_values()\n self.config = read_packaged_file(\"config.yaml\", \"otoole.preprocess\")\n\n def _get_package(self):\n\n if self.sql:\n engine = create_engine(\"sqlite:///{}\".format(self.datapackage))\n package = Package(storage=\"sql\", engine=engine)\n else:\n package = read_datapackage(self.datapackage) # typing: datapackage.Package\n\n return package\n\n def _get_default_values(self):\n default_resource = (\n self.package.pop(\"default_values\").set_index(\"name\").to_dict()\n )\n return default_resource[\"default_value\"]\n\n def convert(self):\n \"\"\"Perform the conversion from datapackage to destination format\n \"\"\"\n\n handle = self._header()\n logger.debug(self.default_values)\n\n for name, df in self.package.items():\n logger.debug(name)\n\n if df.empty:\n columns = [x[\"name\"] for x in df._metadata[\"schema\"][\"fields\"]]\n df = pd.DataFrame(columns=columns)\n\n df = df.reset_index()\n if \"index\" in df.columns:\n df = df.drop(columns=\"index\")\n\n logger.debug(\"Number of columns: %s, %s\", len(df.columns), df.columns)\n if len(df.columns) > 1:\n default_value = self.default_values[name]\n self._write_parameter(df, name, handle, default=default_value)\n\n else:\n self._write_set(df, name, handle)\n\n self._footer(handle)\n\n handle.close()\n\n @abstractmethod\n def _header(self) -> TextIO:\n raise NotImplementedError()\n\n @abstractmethod\n def _write_parameter(\n self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float\n ) -> pd.DataFrame:\n \"\"\"Write parameter data\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:\n \"\"\"Write set data\"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def _footer(self, handle: TextIO):\n raise NotImplementedError()\n\n\nclass DataPackageToCsv(DataPackageTo):\n def _header(self):\n filepath = open(self.datafilepath, \"w\")\n msg = \"# Model file written by *otoole*\\n\"\n filepath.write(msg)\n return filepath\n\n def _form_parameter(self, df: pd.DataFrame, default: float):\n\n df = df[df.VALUE != default]\n return df\n\n def _write_parameter(\n self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float\n ):\n \"\"\"Write parameter data to a csv file, omitting data which matches the default value\n\n Arguments\n ---------\n filepath : StreamIO\n df : pandas.DataFrame\n parameter_name : str\n handle: TextIO\n default : int\n \"\"\"\n df = self._form_parameter(df, default)\n handle.write(\"param default {} : {} :=\\n\".format(default, parameter_name))\n df.to_csv(path_or_buf=handle, sep=\" \", header=False, index=False)\n handle.write(\";\\n\")\n\n def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):\n \"\"\"\n\n Arguments\n ---------\n df : pandas.DataFrame\n set_name : str\n handle: TextIO\n \"\"\"\n handle.write(\"set {} :=\\n\".format(set_name))\n df.to_csv(path_or_buf=handle, sep=\" \", header=False, index=False)\n handle.write(\";\\n\")\n\n def _footer(self, handle: TextIO):\n handle.write(\"end;\\n\")\n handle.close()\n\n\nclass DataPackageToExcel(DataPackageTo):\n def _header(self):\n return pd.ExcelWriter(self.datafilepath, mode=\"w\")\n\n def _form_parameter(\n self, df: pd.DataFrame, parameter_name: str, default: float\n ) -> pd.DataFrame:\n \"\"\"Converts data into wide format\n\n Arguments\n ---------\n df: pd.DataFrame\n parameter_name: str\n default: float\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n\n if not df.empty:\n\n names = df.columns.to_list()\n if len(names) > 2:\n logger.debug(\n \"More than 2 columns for {}: {}\".format(parameter_name, names)\n )\n rows = names[0:-2]\n columns = names[-2]\n values = names[-1]\n logger.debug(\"Rows: {}; columns: {}; values: {}\", rows, columns, values)\n logger.debug(\"dtypes: {}\".format(df.dtypes))\n pivot = pd.pivot_table(\n df, index=rows, columns=columns, values=values, fill_value=default\n )\n elif len(names) == 2:\n logger.debug(\"Two columns for {}: {}\".format(parameter_name, names))\n values = names[-1]\n rows = names[0:-2]\n logger.debug(\"Rows: {}; values: {}\", rows, values)\n pivot = pd.pivot_table(\n df, index=rows, values=values, fill_value=default\n )\n else:\n logger.debug(\"One column for {}: {}\".format(parameter_name, names))\n pivot = df.copy()\n pivot = pivot.reset_index(drop=True)\n\n else:\n logger.debug(\"Dataframe {} is empty\".format(parameter_name))\n pivot = df.copy()\n\n return pivot\n\n def _write_parameter(\n self,\n df: pd.DataFrame,\n parameter_name: str,\n handle: pd.ExcelWriter,\n default: float,\n ):\n df = self._form_parameter(df, parameter_name, default)\n df.to_excel(handle, sheet_name=parameter_name, merge_cells=False)\n\n def _write_set(self, df: pd.DataFrame, set_name, handle: pd.ExcelWriter):\n df.to_excel(handle, sheet_name=set_name, merge_cells=False, index=False)\n\n def _footer(self, handle=pd.ExcelWriter):\n handle.close()\n\n\ndef convert_datapackage_to_datafile(path_to_datapackage, path_to_datafile):\n dp = DataPackageToCsv(path_to_datapackage, path_to_datafile)\n dp.convert()\n\n\ndef convert_datapackage_to_excel(path_to_datapackage, path_to_excel):\n dp = DataPackageToExcel(path_to_datapackage, path_to_excel)\n dp.convert()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n path_to_datapackage = sys.argv[1]\n path_to_datafile = sys.argv[2]\n\n DataPackageToCsv(path_to_datapackage, path_to_datafile)\n"
] |
[
[
"pandas.DataFrame",
"pandas.pivot_table",
"pandas.ExcelWriter"
]
] |
andrewellis55/OpenMDAO
|
[
"d01fd526e71add4a203b7d32c534e1eab07dafaf"
] |
[
"openmdao/utils/general_utils.py"
] |
[
"\"\"\"Some miscellaneous utility functions.\"\"\"\nfrom contextlib import contextmanager\nimport os\nimport re\nimport sys\nimport warnings\nimport unittest\nfrom fnmatch import fnmatchcase\nfrom io import StringIO\nfrom numbers import Number\n\n# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x\ntry:\n from collections.abc import Iterable\nexcept ImportError:\n from collections import Iterable\n\nimport numbers\n\nimport numpy as np\n\nfrom openmdao.core.constants import INT_DTYPE, INF_BOUND\nfrom openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation\n\n# Certain command line tools can make use of this to allow visualization of models when errors\n# are present that would normally cause setup to abort.\n_ignore_errors = False\n\n\ndef _convert_auto_ivc_to_conn_name(conns_dict, name):\n \"\"\"\n Convert name of auto_ivc val to promoted input name.\n\n Parameters\n ----------\n conns_dict : dict\n Dictionary of global connections.\n name : str\n Name of auto_ivc to be found.\n\n Returns\n -------\n str\n Promoted input name.\n \"\"\"\n for key, val in conns_dict.items():\n if val == name:\n return key\n\n\ndef ignore_errors(flag=None):\n \"\"\"\n Disable certain errors that will prevent setup from completing.\n\n Parameters\n ----------\n flag : bool or None\n If not None, set the value of _ignore_errors to this value.\n\n Returns\n -------\n bool\n The current value of _ignore_errors.\n \"\"\"\n global _ignore_errors\n if flag is not None:\n _ignore_errors = flag\n return _ignore_errors\n\n\ndef conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):\n \"\"\"\n Raise an exception or issue a warning, depending on the value of _ignore_errors.\n\n Parameters\n ----------\n msg : str\n The error/warning message.\n exc : Exception class\n This exception class is used to create the exception to be raised.\n category : warning class\n This category is the class of warning to be issued.\n err : bool\n If None, use ignore_errors(), otherwise use value of err to determine whether to\n raise an exception (err=True) or issue a warning (err=False).\n \"\"\"\n if (err is None and ignore_errors()) or err is False:\n issue_warning(msg, category=category)\n else:\n raise exc(msg)\n\n\n@contextmanager\ndef ignore_errors_context(flag=True):\n \"\"\"\n Set ignore_errors to the given flag in this context.\n\n Parameters\n ----------\n flag : bool\n If not None, set ignore_errors to this value.\n\n Yields\n ------\n None\n \"\"\"\n save = ignore_errors()\n ignore_errors(flag)\n try:\n yield\n finally:\n ignore_errors(save)\n\n\ndef simple_warning(msg, category=UserWarning, stacklevel=2):\n \"\"\"\n Display a simple warning message without the annoying extra line showing the warning call.\n\n Parameters\n ----------\n msg : str\n The warning message.\n category : class\n The warning class.\n stacklevel : int\n Number of levels up the stack to identify as the warning location.\n \"\"\"\n warn_deprecation('simple_warning is deprecated. '\n 'Use openmdao.utils.om_warnings.issue_warning instead.')\n old_format = warnings.formatwarning\n warnings.formatwarning = _warn_simple_format\n try:\n warnings.warn(msg, category, stacklevel)\n finally:\n warnings.formatwarning = old_format\n\n\ndef ensure_compatible(name, value, shape=None, indices=None):\n \"\"\"\n Make value compatible with the specified shape or the shape of indices.\n\n Parameters\n ----------\n name : str\n The name of the value.\n value : float or list or tuple or ndarray or Iterable\n The value of a variable.\n shape : int or tuple or list or None\n The expected or desired shape of the value.\n indices : Indexer or None\n The indices into a source variable.\n\n Returns\n -------\n ndarray\n The value in a shape compatible with the specified shape and/or indices.\n tuple\n The resulting shape of the value.\n\n Raises\n ------\n ValueError\n If value cannot be made to conform to shape or if shape and indices\n are incompatible.\n \"\"\"\n if isinstance(value, Iterable):\n value = np.asarray(value)\n\n # if shape is not given, infer from value (if not scalar) or indices\n if shape is not None:\n if isinstance(shape, numbers.Integral):\n shape = (shape,)\n elif isinstance(shape, list):\n shape = tuple(shape)\n elif not np.isscalar(value):\n shape = np.atleast_1d(value).shape\n\n if indices is not None:\n if not indices._flat_src and shape is None:\n raise RuntimeError(\"src_indices for '%s' is not flat, so its input \"\n \"shape must be provided.\" % name)\n try:\n indshape = indices.indexed_src_shape\n except (RuntimeError, ValueError, TypeError):\n pass # use shape provided or shape of value and check vs. shape of indices later\n else:\n if shape is not None and np.product(indshape) != np.product(shape):\n raise ValueError(\"Shape of indices %s does not match shape of %s for '%s'.\" %\n (indshape, shape, name))\n if shape is None:\n shape = indshape\n\n if shape is None:\n # shape is not determined, assume the shape of value was intended\n value = np.atleast_1d(value)\n shape = value.shape\n else:\n # shape is determined, if value is scalar assign it to array of shape\n # otherwise make sure value is an array of the determined shape\n if np.isscalar(value) or value.shape == (1,):\n value = np.ones(shape) * value\n else:\n value = np.atleast_1d(value).astype(np.float64)\n if value.shape != shape:\n raise ValueError(\"Incompatible shape for '%s': Expected %s but got %s.\" %\n (name, shape, value.shape))\n\n return value, shape\n\n\ndef determine_adder_scaler(ref0, ref, adder, scaler):\n r\"\"\"\n Determine proper values of adder and scaler based on user arguments.\n\n Adder and Scaler are used internally because the transformation is\n slightly more efficient.\n\n Parameters\n ----------\n ref0 : float or ndarray, optional\n Value of response variable that scales to 0.0 in the driver.\n ref : float or ndarray, optional\n Value of response variable that scales to 1.0 in the driver.\n adder : float or ndarray, optional\n Value to add to the model value to get the scaled value. Adder\n is first in precedence.\n scaler : float or ndarray, optional\n Value to multiply the model value to get the scaled value. Scaler\n is second in precedence.\n\n Returns\n -------\n tuple\n Adder and scaler, properly formatted and based on ref/ref0 if provided.\n\n Raises\n ------\n ValueError\n If both ref/ref0 and adder/scaler were provided.\n\n Notes\n -----\n The response can be scaled using ref and ref0.\n The argument :code:`ref0` represents the physical value when the scaled value is 0.\n The argument :code:`ref` represents the physical value when the scaled value is 1.\n \"\"\"\n # Affine scaling cannot be used with scalers/adders\n if ref0 is not None or ref is not None:\n if scaler is not None or adder is not None:\n raise ValueError('Inputs ref/ref0 are mutually exclusive '\n 'with scaler/adder')\n if ref is None:\n ref = 1.0\n if ref0 is None:\n ref0 = 0.0\n\n # Convert ref/ref0 to scaler/adder so we can scale the bounds\n adder = -ref0\n scaler = 1.0 / (ref + adder)\n\n else:\n if scaler is None:\n scaler = 1.0\n if adder is None:\n adder = 0.0\n\n adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)\n scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)\n\n return adder, scaler\n\n\ndef set_pyoptsparse_opt(optname, fallback=True):\n \"\"\"\n For testing, sets the pyoptsparse optimizer using the given optimizer name.\n\n This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.\n This can be used on systems that have SNOPT installed to force them to use\n SLSQP in order to mimic our test machines on travis and appveyor.\n\n Parameters\n ----------\n optname : str\n Name of pyoptsparse optimizer that is requested by the test.\n fallback : bool\n If True, fall back to SLSQP if optname can't be found.\n\n Returns\n -------\n object\n Pyoptsparse optimizer instance.\n str\n Pyoptsparse optimizer string.\n \"\"\"\n OPT = None\n opt = None\n OPTIMIZER = None\n force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')\n if force:\n optname = force\n\n from unittest.mock import Mock\n\n try:\n from pyoptsparse import OPT\n\n try:\n opt = OPT(optname)\n OPTIMIZER = optname\n except Exception:\n if fallback and optname != 'SLSQP':\n try:\n opt = OPT('SLSQP')\n OPTIMIZER = 'SLSQP'\n except Exception:\n pass\n else:\n if fallback and isinstance(opt, Mock):\n try:\n opt = OPT('SLSQP')\n OPTIMIZER = 'SLSQP'\n except Exception:\n pass\n except Exception:\n pass\n\n if isinstance(opt, Mock):\n OPT = OPTIMIZER = None\n\n if not fallback and OPTIMIZER != optname:\n raise unittest.SkipTest(\"pyoptsparse is not providing %s\" % optname)\n\n return OPT, OPTIMIZER\n\n\ndef format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):\n \"\"\"\n Format array option values.\n\n Checks that the given array values are either None, float, or an iterable\n of numeric values. On output all iterables of numeric values are\n converted to a flat np.ndarray. If values is scalar, it is converted\n to float.\n\n Parameters\n ----------\n name : str\n The path of the variable relative to the current system.\n values : float or numpy ndarray or Iterable\n Values of the array option to be formatted to the expected form.\n val_if_none : float or numpy ndarray\n The default value for the option if values is None.\n flatten : bool\n Set to True to flatten any ndarray return.\n\n Returns\n -------\n float or np.ndarray\n Values transformed to the expected form.\n\n Raises\n ------\n ValueError\n If values is Iterable but cannot be converted to a numpy ndarray\n TypeError\n If values is scalar, not None, and not a Number.\n \"\"\"\n # Convert adder to ndarray/float as necessary\n if isinstance(values, np.ndarray):\n if flatten:\n values = values.flatten()\n elif not isinstance(values, str) \\\n and isinstance(values, Iterable):\n values = np.asarray(values, dtype=float)\n if flatten:\n values = values.flatten()\n elif values is None:\n values = val_if_none\n elif values == float('inf'):\n values = INF_BOUND\n elif values == -float('inf'):\n values = -INF_BOUND\n elif isinstance(values, numbers.Number):\n values = float(values)\n else:\n raise TypeError('Expected values of {0} to be an Iterable of '\n 'numeric values, or a scalar numeric value. '\n 'Got {1} instead.'.format(name, values))\n return values\n\n\nclass ContainsAll(object):\n \"\"\"\n A fake dictionary that always reports __contains__(name) to be True.\n \"\"\"\n\n def __contains__(self, name):\n \"\"\"\n Return if the named object is contained.\n\n Parameters\n ----------\n name : str\n Name of the object being looked up.\n\n Returns\n -------\n bool\n Always returns True.\n \"\"\"\n return True\n\n\ndef all_ancestors(pathname, delim='.'):\n \"\"\"\n Return a generator of pathnames of the starting object and all of its parents.\n\n Pathnames are ordered from longest to shortest.\n\n Parameters\n ----------\n pathname : str\n Pathname of starting object.\n delim : str\n Delimiter used to split the name.\n\n Yields\n ------\n str\n \"\"\"\n parts = pathname.split(delim)\n for i in range(len(parts), 0, -1):\n yield delim.join(parts[:i])\n\n\ndef find_matches(pattern, var_list):\n \"\"\"\n Return list of variable names that match given pattern.\n\n Parameters\n ----------\n pattern : str\n Glob pattern or variable name.\n var_list : list of str\n List of variable names to search for pattern.\n\n Returns\n -------\n list\n Variable names that match pattern.\n \"\"\"\n if pattern == '*':\n return var_list\n elif pattern in var_list:\n return [pattern]\n return [name for name in var_list if fnmatchcase(name, pattern)]\n\n\ndef pad_name(name, pad_num=10, quotes=False):\n \"\"\"\n Pad a string so that they all line up when stacked.\n\n Parameters\n ----------\n name : str\n The string to pad.\n pad_num : int\n The number of total spaces the string should take up.\n quotes : bool\n If name should be quoted.\n\n Returns\n -------\n str\n Padded string.\n \"\"\"\n l_name = len(name)\n quotes_len = 2 if quotes else 0\n if l_name + quotes_len < pad_num:\n pad = pad_num - (l_name + quotes_len)\n if quotes:\n pad_str = \"'{name}'{sep:<{pad}}\"\n else:\n pad_str = \"{name}{sep:<{pad}}\"\n pad_name = pad_str.format(name=name, sep='', pad=pad)\n return pad_name\n else:\n if quotes:\n return \"'{0}'\".format(name)\n else:\n return '{0}'.format(name)\n\n\ndef run_model(prob, ignore_exception=False):\n \"\"\"\n Call `run_model` on problem and capture output.\n\n Parameters\n ----------\n prob : Problem\n An instance of Problem.\n ignore_exception : bool\n Set to True to ignore an exception of any kind.\n\n Returns\n -------\n string\n Output from calling `run_model` on the Problem, captured from stdout.\n \"\"\"\n stdout = sys.stdout\n strout = StringIO()\n\n sys.stdout = strout\n try:\n prob.run_model()\n except Exception as err:\n if not ignore_exception:\n raise err\n finally:\n sys.stdout = stdout\n\n return strout.getvalue()\n\n\ndef run_driver(prob):\n \"\"\"\n Call `run_driver` on problem and capture output.\n\n Parameters\n ----------\n prob : Problem\n An instance of Problem.\n\n Returns\n -------\n bool\n Failure flag; True if failed to converge, False is successful.\n string\n Output from calling `run_driver` on the Problem, captured from stdout.\n \"\"\"\n stdout = sys.stdout\n strout = StringIO()\n\n sys.stdout = strout\n try:\n failed = prob.run_driver()\n finally:\n sys.stdout = stdout\n\n return failed, strout.getvalue()\n\n\n@contextmanager\ndef printoptions(*args, **kwds):\n \"\"\"\n Context manager for setting numpy print options.\n\n Set print options for the scope of the `with` block, and restore the old\n options at the end. See `numpy.set_printoptions` for the full description of\n available options. If any invalid options are specified, they will be ignored.\n\n >>> with printoptions(precision=2):\n ... print(np.array([2.0])) / 3\n [0.67]\n The `as`-clause of the `with`-statement gives the current print options:\n >>> with printoptions(precision=2) as opts:\n ... assert_equal(opts, np.get_printoptions())\n\n Parameters\n ----------\n *args : list\n Variable-length argument list.\n **kwds : dict\n Arbitrary keyword arguments.\n\n Yields\n ------\n str or int\n\n See Also\n --------\n set_printoptions, get_printoptions\n \"\"\"\n opts = np.get_printoptions()\n\n # ignore any keyword args that are not valid in this version of numpy\n # e.g. numpy <=1.13 does not have the 'floatmode' option\n kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)\n\n try:\n np.set_printoptions(*args, **kw_opts)\n yield np.get_printoptions()\n finally:\n np.set_printoptions(**opts)\n\n\ndef _nothing():\n yield None\n\n\ndef do_nothing_context():\n \"\"\"\n Do nothing.\n\n Useful when you have a block of code that only requires a context manager sometimes,\n and you don't want to repeat the context managed block.\n\n Returns\n -------\n contextmanager\n A do nothing context manager.\n \"\"\"\n return contextmanager(_nothing)()\n\n\ndef remove_whitespace(s, right=False, left=False):\n \"\"\"\n Remove white-space characters from the given string.\n\n If neither right nor left is specified (the default),\n then all white-space is removed.\n\n Parameters\n ----------\n s : str\n The string to be modified.\n right : bool\n If True, remove white-space from the end of the string.\n left : bool\n If True, remove white-space from the beginning of the string.\n\n Returns\n -------\n str\n The string with white-space removed.\n \"\"\"\n if not left and not right:\n return re.sub(r\"\\s+\", \"\", s, flags=re.UNICODE)\n elif right and left:\n return re.sub(r\"^\\s+|\\s+$\", \"\", s, flags=re.UNICODE)\n elif right:\n return re.sub(r\"\\s+$\", \"\", s, flags=re.UNICODE)\n else: # left\n return re.sub(r\"^\\s+\", \"\", s, flags=re.UNICODE)\n\n\n_badtab = r'`~@#$%^&*()[]{}-+=|\\/?<>,.:;'\n_transtab = str.maketrans(_badtab, '_' * len(_badtab))\n\n\ndef str2valid_python_name(s):\n \"\"\"\n Translate a given string into a valid python variable name.\n\n Parameters\n ----------\n s : str\n The string to be translated.\n\n Returns\n -------\n str\n The valid python name string.\n \"\"\"\n return s.translate(_transtab)\n\n\n_container_classes = (list, tuple, set)\n\n\ndef make_serializable(o):\n \"\"\"\n Recursively convert numpy types to native types for JSON serialization.\n\n This function should NOT be passed into json.dump or json.dumps as the 'default' arg.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n \"\"\"\n if isinstance(o, _container_classes):\n return [make_serializable(item) for item in o]\n elif isinstance(o, dict):\n s_key = [make_serializable_key(item) for item in o.keys()]\n s_val = [make_serializable(item) for item in o.values()]\n return dict(zip(s_key, s_val))\n elif isinstance(o, np.ndarray):\n return o.tolist()\n elif isinstance(o, np.number):\n return o.item()\n elif isinstance(o, (str, float, int)):\n return o\n elif isinstance(o, bool) or isinstance(o, complex):\n return str(o)\n elif hasattr(o, '__dict__'):\n try:\n return o.to_json()\n except AttributeError:\n return o.__class__.__name__\n else:\n return o\n\n\ndef make_serializable_key(o):\n \"\"\"\n Recursively convert numpy types to native types for JSON serialization.\n\n This function is for making serizializable dictionary keys, so no containers.\n This function should NOT be passed into json.dump or json.dumps as the 'default' arg.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n \"\"\"\n if isinstance(o, str):\n return o\n elif isinstance(o, np.number):\n return o.item()\n elif hasattr(o, '__dict__'):\n return o.__class__.__name__\n else:\n return str(o)\n\n\ndef default_noraise(o):\n \"\"\"\n Try to convert some extra types during JSON serialization.\n\n This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will\n attempt to convert values if possible, but if no conversion works, will return\n 'unserializable object (<type>)' instead of raising a TypeError.\n\n Parameters\n ----------\n o : object\n The object to be converted.\n\n Returns\n -------\n object\n The converted object.\n \"\"\"\n if isinstance(o, _container_classes):\n return [default_noraise(item) for item in o]\n elif isinstance(o, dict):\n s_key = [make_serializable_key(item) for item in o.keys()]\n s_val = [default_noraise(item) for item in o.values()]\n return dict(zip(s_key, s_val))\n elif isinstance(o, np.ndarray):\n return o.tolist()\n elif isinstance(o, np.number):\n return o.item()\n elif isinstance(o, (str, float, int)):\n return o\n elif isinstance(o, bool) or isinstance(o, complex):\n return str(o)\n elif hasattr(o, '__dict__'):\n return o.__class__.__name__\n elif o is None:\n return None\n else:\n return f\"unserializable object ({type(o).__name__})\"\n\n\ndef make_set(str_data, name=None):\n \"\"\"\n Construct a set containing the specified character strings.\n\n Parameters\n ----------\n str_data : None, str, or list of strs\n Character string(s) to be included in the set.\n\n name : str, optional\n A name to be used in error messages.\n\n Returns\n -------\n set\n A set of character strings.\n \"\"\"\n if not str_data:\n return set()\n elif isinstance(str_data, str):\n return {str_data}\n elif isinstance(str_data, (set, list)):\n\n for item in str_data:\n if not isinstance(item, str):\n typ = type(item).__name__\n msg = f\"Items in tags should be of type string, but type '{typ}' was found.\"\n raise TypeError(msg)\n\n if isinstance(str_data, set):\n return str_data\n elif isinstance(str_data, list):\n return set(str_data)\n\n elif name:\n raise TypeError(\"The {} argument should be str, set, or list: {}\".format(name, str_data))\n else:\n raise TypeError(\"The argument should be str, set, or list: {}\".format(str_data))\n\n\ndef match_includes_excludes(name, includes=None, excludes=None):\n \"\"\"\n Check to see if the variable names pass through the includes and excludes filter.\n\n Parameters\n ----------\n name : str\n Name to be checked for match.\n includes : iter of str or None\n Glob patterns for name to include in the filtering. None, the default, means\n include all.\n excludes : iter of str or None\n Glob patterns for name to exclude in the filtering.\n\n Returns\n -------\n bool\n Return True if the name passes through the filtering of includes and excludes.\n \"\"\"\n # Process excludes\n if excludes is not None:\n for pattern in excludes:\n if fnmatchcase(name, pattern):\n return False\n\n # Process includes\n if includes is None:\n return True\n else:\n for pattern in includes:\n if fnmatchcase(name, pattern):\n return True\n\n return False\n\n\ndef match_prom_or_abs(name, prom_name, includes=None, excludes=None):\n \"\"\"\n Check to see if the variable names pass through the includes and excludes filter.\n\n Parameters\n ----------\n name : str\n Unpromoted variable name to be checked for match.\n prom_name : str\n Promoted variable name to be checked for match.\n includes : iter of str or None\n Glob patterns for name to include in the filtering. None, the default, means\n to include all.\n excludes : iter of str or None\n Glob patterns for name to exclude in the filtering.\n\n Returns\n -------\n bool\n Return True if the name passes through the filtering of includes and excludes.\n \"\"\"\n diff = name != prom_name\n\n # Process excludes\n if excludes is not None:\n for pattern in excludes:\n if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):\n return False\n\n # Process includes\n if includes is None:\n return True\n else:\n for pattern in includes:\n if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):\n return True\n\n return False\n\n\n_falsey = {'0', 'false', 'no', ''}\n\n\ndef env_truthy(env_var):\n \"\"\"\n Return True if the given environment variable is 'truthy'.\n\n Parameters\n ----------\n env_var : str\n The name of the environment variable.\n\n Returns\n -------\n bool\n True if the specified environment variable is 'truthy'.\n \"\"\"\n return os.environ.get(env_var, '0').lower() not in _falsey\n\n\ndef common_subpath(pathnames):\n \"\"\"\n Return the common dotted subpath found in all of the given dotted pathnames.\n\n Parameters\n ----------\n pathnames : iter of str\n Dotted pathnames of systems.\n\n Returns\n -------\n str\n Common dotted subpath. Returns '' if no common subpath is found.\n \"\"\"\n if len(pathnames) == 1:\n return pathnames[0]\n\n if pathnames:\n npaths = len(pathnames)\n splits = [p.split('.') for p in pathnames]\n minlen = np.min([len(s) for s in splits])\n for common_loc in range(minlen):\n p0 = splits[0][common_loc]\n for i in range(1, npaths):\n if p0 != splits[i][common_loc]:\n break\n else:\n continue\n break\n else:\n common_loc += 1\n\n return '.'.join(splits[0][:common_loc])\n\n return ''\n\n\ndef _is_slicer_op(indices):\n \"\"\"\n Check if an indexer contains a slice or ellipsis operator.\n\n Parameters\n ----------\n indices : ndarray\n Indices to check.\n\n Returns\n -------\n bool\n Returns True if indices contains a colon or ellipsis operator.\n \"\"\"\n if isinstance(indices, tuple):\n return any(isinstance(i, slice) or i is ... for i in indices)\n\n return isinstance(indices, slice)\n\n\ndef _slice_indices(slicer, arr_size, arr_shape):\n \"\"\"\n Return an index array based on a slice or slice tuple and the array size and shape.\n\n Parameters\n ----------\n slicer : slice or tuple containing slices\n Slice object to slice array\n arr_size : int\n Size of output array\n arr_shape : tuple\n Tuple of output array shape\n\n Returns\n -------\n array\n Returns the sliced indices.\n \"\"\"\n if isinstance(slicer, slice):\n # for a simple slice we can use less memory\n start, stop, step = slicer.start, slicer.stop, slicer.step\n if start is None:\n start = 0\n if stop is None:\n stop = arr_size\n if step is None:\n step = 1\n return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)\n else:\n return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]\n\n\ndef _prom2ivc_src_name_iter(prom_dict):\n \"\"\"\n Yield keys from prom_dict with promoted input names converted to ivc source names.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Yields\n ------\n str\n name\n \"\"\"\n for name, meta in prom_dict.items():\n if meta['ivc_source'] is not None:\n yield meta['ivc_source']\n else:\n yield name\n\n\ndef _prom2ivc_src_item_iter(prom_dict):\n \"\"\"\n Yield items from prom_dict with promoted input names converted to ivc source names.\n\n The result is that all names are absolute.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Yields\n ------\n tuple\n name, metadata\n \"\"\"\n for name, meta in prom_dict.items():\n if meta['ivc_source'] is not None:\n yield meta['ivc_source'], meta\n else:\n yield name, meta\n\n\ndef _prom2ivc_src_dict(prom_dict):\n \"\"\"\n Convert a dictionary with promoted input names into one with ivc source names.\n\n Parameters\n ----------\n prom_dict : dict\n Original dict with some promoted paths.\n\n Returns\n -------\n dict\n New dict with ivc source pathnames.\n \"\"\"\n return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}\n\n\ndef convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):\n \"\"\"\n Compute lower level src_indices based on parent src_indices.\n\n Parameters\n ----------\n parent_src_inds : ndarray\n Parent src_indices.\n parent_src_shape : tuple\n Shape of source expected by parent.\n my_src_inds : ndarray or fancy index\n Src_indices at the current system level, before conversion.\n my_src_shape : tuple\n Expected source shape at the current system level.\n\n Returns\n -------\n ndarray\n Final src_indices based on those of the parent.\n \"\"\"\n if parent_src_inds is None:\n return my_src_inds\n elif my_src_inds is None:\n return parent_src_inds\n\n if my_src_inds._flat_src:\n return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]\n else:\n return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]\n\n\ndef shape2tuple(shape):\n \"\"\"\n Return shape as a tuple.\n\n Parameters\n ----------\n shape : int or tuple\n The given shape.\n\n Returns\n -------\n tuple\n The shape as a tuple.\n \"\"\"\n if isinstance(shape, Number):\n return (shape,)\n elif shape is None:\n return shape\n return tuple(shape)\n\n\ndef get_connection_owner(system, tgt):\n \"\"\"\n Return (owner, promoted_src, promoted_tgt) for the given connected target.\n\n Note : this is not speedy. It's intended for use only in error messages.\n\n Parameters\n ----------\n system : System\n Any System. The search always goes from the model level down.\n tgt : str\n Absolute pathname of the target variable.\n\n Returns\n -------\n tuple\n (wning group, promoted source name, promoted target name).\n \"\"\"\n from openmdao.core.group import Group\n\n model = system._problem_meta['model_ref']()\n src = model._conn_global_abs_in2out[tgt]\n abs2prom = model._var_allprocs_abs2prom\n\n if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:\n if abs2prom['input'][tgt] != abs2prom['output'][src]:\n # connection is explicit\n for g in model.system_iter(include_self=True, recurse=True, typ=Group):\n if g._manual_connections:\n tprom = g._var_allprocs_abs2prom['input'][tgt]\n if tprom in g._manual_connections:\n return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom\n\n return None, None, None\n\n\ndef wing_dbg():\n \"\"\"\n Make import of wingdbstub contingent on value of WING_DBG environment variable.\n\n Also will import wingdbstub from the WINGHOME directory.\n \"\"\"\n if env_truthy('WING_DBG'):\n import sys\n import os\n save = sys.path\n new = sys.path[:] + [os.environ['WINGHOME']]\n sys.path = new\n try:\n import wingdbstub\n finally:\n sys.path = save\n\n\nclass LocalRangeIterable(object):\n \"\"\"\n Iterable object yielding local indices while iterating over local or distributed vars.\n\n The number of iterations for a distributed variable will be the full distributed size of the\n variable but None will be returned for any indices that are not local to the given rank.\n\n Parameters\n ----------\n system : System\n Containing System.\n vname : str\n Name of the variable.\n use_vec_offset : bool\n If True, return indices for the given variable within its vector, else just return\n indices within the variable itself, i.e. range(var_size).\n\n Attributes\n ----------\n _inds : ndarray\n Variable indices (unused for distributed variables).\n _dist_size : int\n Full size of distributed variable.\n _start : int\n Starting index of distributed variable on this rank.\n _end : int\n Last index + 1 of distributed variable on this rank.\n _offset : int\n Offset of this variable into the local vector,.\n _iter : method\n The iteration method used.\n \"\"\"\n\n def __init__(self, system, vname, use_vec_offset=True):\n \"\"\"\n Initialize the iterator.\n \"\"\"\n self._dist_size = 0\n\n abs2meta = system._var_allprocs_abs2meta['output']\n if vname in abs2meta:\n sizes = system._var_sizes['output']\n slices = system._outputs.get_slice_dict()\n else:\n abs2meta = system._var_allprocs_abs2meta['input']\n sizes = system._var_sizes['input']\n slices = system._inputs.get_slice_dict()\n\n if abs2meta[vname]['distributed']:\n var_idx = system._var_allprocs_abs2idx[vname]\n rank = system.comm.rank\n self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0\n\n self._iter = self._dist_iter\n self._start = np.sum(sizes[:rank, var_idx])\n self._end = self._start + sizes[rank, var_idx]\n self._dist_size = np.sum(sizes[:, var_idx])\n else:\n self._iter = self._serial_iter\n if use_vec_offset:\n self._inds = range(slices[vname].start, slices[vname].stop)\n else:\n self._inds = range(slices[vname].stop - slices[vname].start)\n\n def _serial_iter(self):\n \"\"\"\n Iterate over a local non-distributed variable.\n\n Yields\n ------\n int\n Variable index.\n \"\"\"\n yield from self._inds\n\n def _dist_iter(self):\n \"\"\"\n Iterate over a distributed variable.\n\n Yields\n ------\n int or None\n Variable index or None if index is not local to this rank.\n \"\"\"\n start = self._start\n end = self._end\n\n for i in range(self._dist_size):\n if i >= start and i < end:\n yield i - start + self._offset\n else:\n yield None\n\n def __iter__(self):\n \"\"\"\n Return an iterator.\n\n Returns\n -------\n iterator\n An iterator over our indices.\n \"\"\"\n return self._iter()\n"
] |
[
[
"numpy.product",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.sum",
"numpy.ones",
"numpy.get_printoptions",
"numpy.atleast_1d",
"numpy.isscalar",
"numpy.arange"
]
] |
Payal197bhadra/ComputerVision
|
[
"d66b5037ece99b6189dd4306b2c9be67cffd14af"
] |
[
"OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv/Affine Transformation/shearing.py"
] |
[
"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimg= cv2.imread(\"img.png\")\nimg=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nplt.axis('off')\n# show the image\nplt.imshow(img)\nplt.show()\n\n# get the image shape\nrows, cols, dim = img.shape\nrows, cols, dim = img.shape\n# transformation matrix for Shearing\n# shearing applied to x-axis\nM1 = np.float32([[1, 0.5, 0],\n \t[0, 1 , 0],\n \t[0, 0 , 1]])\n# shearing applied to y-axis\nM2 = np.float32([[1, 0, 0],\n \t [0.5, 1, 0],\n \t [0, 0, 1]])\n# apply a perspective transformation to the image\nsheared_img_in_x = cv2.warpPerspective(img,M1,(int(cols*1.5),int(rows*1.5)))\nsheared_img_in_y = cv2.warpPerspective(img,M2,(int(cols*1.5),int(rows*1.5)))\n# disable x & y axis\nplt.axis('off')\n# show the resulting image\nplt.subplot(121)\nplt.imshow(sheared_img_in_x)\nplt.subplot(122)\nplt.imshow(sheared_img_in_y)\nplt.show()"
] |
[
[
"numpy.float32",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot"
]
] |
Droliven/MSRGCN
|
[
"5d8d8e3365d3b23ca2ac734ace7e84135a6e3a9e"
] |
[
"run/cmu_runner.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@project : MSRGCN\n@file : cmu_runner.py\n@author : Droliven\n@contact : droliven@163.com\n@ide : PyCharm\n@time : 2021-07-28 13:29\n'''\n\n\nfrom datas import CMUMotionDataset, get_dct_matrix, reverse_dct_torch, define_actions_cmu, draw_pic_gt_pred\nfrom nets import MSRGCN, MSRGCNShortTerm\nfrom configs.config import Config\n\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\nimport torch\nimport os\nfrom tensorboardX import SummaryWriter\nimport numpy as np\nfrom tqdm import tqdm\nfrom pprint import pprint\n\ndef L2NormLoss_test(gt, out, frame_ids): # (batch size,feature dim, seq len)\n '''\n gt: B, 66, 25\n '''\n t_3d = np.zeros(len(frame_ids))\n\n batch_size, features, seq_len = gt.shape\n gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3\n out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3\n for k in np.arange(0, len(frame_ids)):\n j = frame_ids[k]\n t_3d[k] = torch.mean(torch.norm(gt[:, j, :, :].contiguous().view(-1, 3) - out[:, j, :, :].contiguous().view(-1, 3), 2, 1)).cpu().data.numpy() * batch_size\n return t_3d\n\ndef L2NormLoss_train(gt, out):\n '''\n # (batch size,feature dim, seq len)\n ็ญๅไบ mpjpe_error_p3d()\n '''\n batch_size, _, seq_len = gt.shape\n gt = gt.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()\n out = out.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()\n loss = torch.mean(torch.norm(gt - out, 2, dim=-1))\n return loss\n\ndef lr_decay(optimizer, lr_now, gamma):\n lr = lr_now * gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\nclass CMURunner():\n def __init__(self, exp_name=\"cmu\", input_n=10, output_n=10, dct_n=15, device=\"cuda:0\", num_works=0, test_manner=\"all\", debug_step=1):\n super(CMURunner, self).__init__()\n\n # ๅๆฐ\n self.start_epoch = 1\n self.best_accuracy = 1e15\n\n self.cfg = Config(exp_name=exp_name, input_n=input_n, output_n=output_n, dct_n=dct_n, device=device, num_works=num_works, test_manner=test_manner)\n print(\"\\n================== Configs =================\")\n pprint(vars(self.cfg), indent=4)\n print(\"==========================================\\n\")\n with open(os.path.join(self.cfg.ckpt_dir, \"config.txt\"), 'w', encoding='utf-8') as f:\n f.write(str(self.cfg.__dict__))\n # ๆจกๅ\n if self.cfg.output_n == 25:\n self.model = MSRGCN(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)\n elif self.cfg.output_n == 10:\n self.model = MSRGCNShortTerm(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)\n\n if self.cfg.device != \"cpu\":\n self.model.cuda(self.cfg.device)\n\n print(\">>> total params: {:.2f}M\\n\".format(\n sum(p.numel() for p in self.model.parameters()) / 1000000.0))\n self.lr = self.cfg.lr\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n\n # ๆฐๆฎ\n dct_m, i_dct_m = get_dct_matrix(self.cfg.seq_len)\n self.dct_m = torch.from_numpy(dct_m).float()\n self.i_dct_m = torch.from_numpy(i_dct_m).float()\n if self.cfg.device != \"cpu\":\n self.dct_m = self.dct_m.cuda(self.cfg.device, non_blocking=True)\n self.i_dct_m = self.i_dct_m.cuda(self.cfg.device, non_blocking=True)\n\n train_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions=\"all\", mode_name=\"train\", input_n=self.cfg.input_n, output_n=self.cfg.output_n,\n dct_used=self.cfg.dct_n, split=0, sample_rate=2,\n down_key=[('p22', 'p12', self.cfg.Index2212),\n ('p12', 'p7', self.cfg.Index127),\n ('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=0, global_min=0, device=self.cfg.device, debug_step=debug_step)\n\n\n print(\"train data shape {}\".format(train_dataset.gt_all_scales['p32'].shape[0]))\n\n self.train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=self.cfg.train_batch_size,\n shuffle=True,\n num_workers=self.cfg.num_works,\n pin_memory=True)\n\n self.global_max = train_dataset.global_max\n self.global_min = train_dataset.global_min\n\n self.test_loader = dict()\n for act in define_actions_cmu(\"all\"):\n test_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions=act, mode_name=\"test\", input_n=self.cfg.input_n, output_n=self.cfg.output_n,\n dct_used=self.cfg.dct_n, split=1, sample_rate=2,\n down_key=[('p22', 'p12', self.cfg.Index2212),\n ('p12', 'p7', self.cfg.Index127),\n ('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=self.global_max, global_min=self.global_min, device=self.cfg.device, debug_step=debug_step)\n\n self.test_loader[act] = DataLoader(\n dataset=test_dataset,\n batch_size=self.cfg.test_batch_size,\n shuffle=False,\n num_workers=self.cfg.num_works,\n pin_memory=True)\n print(\">>> test {} data {}\".format(act, test_dataset.gt_all_scales['p32'].shape[0]))\n\n self.summary = SummaryWriter(self.cfg.ckpt_dir)\n\n def save(self, checkpoint_path, best_err, curr_err):\n state = {\n \"lr\": self.lr,\n \"best_err\": best_err,\n \"curr_err\": curr_err,\n \"model\": self.model.state_dict(),\n \"optimizer\": self.optimizer.state_dict(),\n }\n torch.save(state, checkpoint_path)\n\n def restore(self, checkpoint_path):\n state = torch.load(checkpoint_path, map_location=self.cfg.device)\n self.model.load_state_dict(state[\"model\"])\n self.optimizer.load_state_dict(state[\"optimizer\"])\n self.lr = state[\"lr\"]\n best_err = state['best_err']\n curr_err = state[\"curr_err\"]\n print(\"load from lr {}, curr_avg {}, best_avg {}.\".format(state[\"lr\"], curr_err, best_err))\n\n\n def train(self, epoch):\n self.model.train()\n average_loss = 0\n\n for i, (inputs, gts) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):\n b, cv, t_len = inputs[list(inputs.keys())[0]].shape\n # skip the last batch if only have one sample for batch_norm layers\n if b == 1:\n continue\n\n self.global_step = (epoch - 1) * len(self.train_loader) + i + 1\n\n for k in inputs:\n inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)\n gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)\n\n outputs = self.model(inputs)\n\n losses = None\n for k in outputs:\n # ๅ Norm\n outputs[k] = (outputs[k] + 1) / 2\n outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min\n\n # ๅ่ฝฌ็ฉบ้ด\n outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)\n\n # loss\n loss_curr = L2NormLoss_train(gts[k], outputs[k])\n if losses is None:\n losses = loss_curr\n else:\n losses = losses + loss_curr\n self.summary.add_scalar(f\"Loss/{k}\", loss_curr, self.global_step)\n\n self.optimizer.zero_grad()\n losses.backward()\n self.optimizer.step()\n average_loss += losses.cpu().data.numpy()\n\n average_loss /= (i + 1)\n return average_loss\n\n def test(self, epoch=0):\n self.model.eval()\n\n frame_ids = self.cfg.frame_ids\n total_loss = np.zeros((len(define_actions_cmu(\"all\")), len(frame_ids)))\n\n for act_idx, act in enumerate(define_actions_cmu(\"all\")):\n count = 0\n\n for i, (inputs, gts) in enumerate(self.test_loader[act]):\n b, cv, t_len = inputs[list(inputs.keys())[0]].shape\n for k in inputs:\n inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)\n gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)\n with torch.no_grad():\n outputs = self.model(inputs)\n # ๅ Norm\n for k in outputs:\n outputs[k] = (outputs[k] + 1) / 2\n outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min\n\n # ๅ่ฝฌ็ฉบ้ด\n outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)\n\n # ๅผๅง่ฎก็ฎ\n mygt = gts['p32'].view(-1, self.cfg.origin_noden, 3, self.cfg.seq_len).clone()\n myout = outputs['p22'].view(-1, self.cfg.final_out_noden, 3, self.cfg.seq_len)\n mygt[:, self.cfg.dim_used_3d, :, :] = myout\n mygt[:, self.cfg.dim_repeat_32, :, :] = myout[:, self.cfg.dim_repeat_22, :, :]\n mygt = mygt.view(-1, self.cfg.origin_noden*3, self.cfg.seq_len)\n\n loss = L2NormLoss_test(gts['p32'][:, :, self.cfg.input_n:], mygt[:, :, self.cfg.input_n:], self.cfg.frame_ids)\n total_loss[act_idx] += loss\n # count += 1\n count += mygt.shape[0]\n\n # ************ ็ปๅพ\n if act_idx == 0 and i == 0:\n pred_seq = outputs['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)\n gt_seq = gts['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)\n for t in range(self.cfg.seq_len):\n draw_pic_gt_pred(gt_seq[:, :, t], pred_seq[:, :, t], self.cfg.I22_plot, self.cfg.J22_plot, self.cfg.LR22_plot, os.path.join(self.cfg.ckpt_dir, \"images\", f\"{epoch}_{act}_{t}.png\"))\n\n total_loss[act_idx] /= count\n for fidx, frame in enumerate(frame_ids):\n self.summary.add_scalar(f\"Test/{act}/{frame}\", total_loss[act_idx][fidx], epoch)\n\n self.summary.add_scalar(\"Test/average\", np.mean(total_loss), epoch)\n for fidx, frame in enumerate(frame_ids):\n self.summary.add_scalar(f\"Test/avg{frame}\", np.mean(total_loss[:, fidx]), epoch)\n return total_loss\n\n\n def run(self):\n for epoch in range(self.start_epoch, self.cfg.n_epoch + 1):\n\n if epoch % 2 == 0:\n self.lr = lr_decay(self.optimizer, self.lr, self.cfg.lr_decay)\n self.summary.add_scalar(\"LR\", self.lr, epoch)\n\n average_train_loss = self.train(epoch)\n if average_train_loss < self.best_accuracy:\n self.best_accuracy = average_train_loss\n self.save(\n os.path.join(self.cfg.ckpt_dir, \"models\",\n '{}_in{}out{}dctn{}_best_epoch{}_err{:.4f}.pth'.format(self.cfg.exp_name,\n self.cfg.input_n,\n self.cfg.output_n,\n self.cfg.dct_n, epoch,\n average_train_loss)), self.best_accuracy, average_train_loss)\n\n self.save(os.path.join(self.cfg.ckpt_dir, \"models\",\n '{}_in{}out{}dctn{}_last.pth'.format(self.cfg.exp_name, self.cfg.input_n,\n self.cfg.output_n, self.cfg.dct_n)),\n self.best_accuracy, average_train_loss)\n if epoch % 1 == 0:\n loss_l2_test = self.test(epoch)\n\n print('Epoch: {}, LR: {}, Current err test avg: {}'.format(epoch, self.lr, np.mean(loss_l2_test)))\n\n\nif __name__ == '__main__':\n pass"
] |
[
[
"torch.norm",
"torch.save",
"torch.no_grad",
"numpy.mean",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.load"
]
] |
SVJayanthi/DroneSimulation
|
[
"8fe52609cb367360729f16f4f6402faeadaf6b06"
] |
[
"drone_2.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 10 22:59:51 2019\n\n@author: Sravan\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 14 22:36:21 2019\n\n@author: Sravan\n\"\"\"\nimport csv\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform, euclidean, cdist\n\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport scipy.integrate as integrate\nimport matplotlib.animation as animation\n\n\n\"\"\"\nVariables: Wind speed, Air traffic (# of drones), Obstacles (Trees, Buildings) \nFixed: Distance, Air Resistance, Gravity, Battery level\nRules: Drone Speed (Air traffic, Wind speed, Battery level), Collisions (Drone position)\nStudy: Time, Speed\nMovement: v_air = sqrt(mg/(nAฯ)), p = 1.22 kg m^-3, A = 1 m^2\nยฝcฯAv2 = mgtanฮธ, c = drag coefficient\nP = ยฝฯnAv_air(v_air2 โ v2sin2ฮธ)\nCollisions: Drone - Increase/Decrease Speed, 2) Change path- increasing elevation\n\nhttps://www.research-drone.com/en/extreme_climb_rate.html\nhttps://en.wikipedia.org/wiki/Amazon_Prime_Air\nhttps://homepages.abdn.ac.uk/nph120/meteo/DroneFlight.pdf\n\"\"\"\nclass ParticleBox:\n \"\"\"Orbits class\n \n init_state is an [N x 6] array, where N is the number of particles:\n [[xi1, yi1, zi1, xf1, yf1, zf1, vx1, vy1, vz1, t1],\n [xi2, yi2, zi2, xf2, yf2, zf2, vx2, vy2, vz2, t2],\n ... ]\n\n bounds is the size of the box: [xmin, xmax, ymin, ymax, zmin, zmax]\n \"\"\"\n def __init__(self,\n drones = 1,\n wind = [0, 0, 0],\n obstacles = 0,\n bounds = [-32000, 32000, -32000, 32000, 0, 150],\n size = 1.5,\n max_height = 122,\n max_speed = 22.34,\n acc = 7,\n M = 25.0,\n G = 9.81):\n self.drones = drones\n self.wind = wind\n self.size = size\n self.G = G\n self.max_height = max_height\n self.max_speed = max_speed\n self.acc_vert = acc\n self.acc_vert_eff = acc + G\n self.acc_hor = acc\n self.obstacles = 0\n self.obstacles_size = 40\n self.time_elapsed = 0\n self.bounds = bounds\n \n np.random.seed(0)\n init_state = np.random.random((drones, 10))\n init_state[:, :2] -= 0.5\n init_state[:, :2] *= bounds[1]*2\n init_state[:, 2:] = 0.0\n for i in range(len(init_state)):\n vecs = [64000.0, 64000.0]\n while vecs[0] > bounds[1] or vecs[0] < bounds[0] or vecs[1] > bounds[3] or vecs[1] < bounds[2]:\n vecs = np.random.standard_normal(2)\n mags = np.linalg.norm(vecs)\n vecs /= mags\n vecs *= 16000\n vecs += init_state[i, :2]\n init_state[i, 3:5] =vecs\n \n if obstacles > 0:\n np.random.seed(1)\n obs_state = np.random.random((obstacles, 3))\n obs_state[:, :3] -= 0.5\n obs_state[:, :2] *= bounds[1]*2\n obs_state[:, 2] *= bounds[5]*2\n \n self.init_state = np.asarray(init_state, dtype=float)\n #self.obs_state = np.asarray(obs_state, dtype=float)\n self.M = M * np.ones(self.init_state.shape[0])\n self.state = self.init_state.copy()\n \n #update velocity\n self.state[:, 6] = self.wind[0]\n self.state[:, 7] = self.wind[1]\n self.state[:, 8] = self.wind[2]\n\n def step(self, dt):\n \"\"\"step once by dt seconds\"\"\"\n self.time_elapsed += dt\n \n # find distance to goal\n D = cdist(self.state[:, :3], self.state[:, 3:6], 'euclidean')\n ind, din = np.where(D > 122)\n uniqua = (ind == din)\n ind = ind[uniqua]\n \n # update velocities of individual drones\n for i in zip(ind):\n #velocity vector\n v = self.state[i, 8]\n v_avg = v\n a_ver = self.acc_vert\n a_ver_eff = self.acc_vert_eff\n height = self.max_height - self.state[i, 2]\n print(height)\n if height > 0:\n n = 1\n if v > 0:\n n = v / abs(v)\n stop = n * v**2/(2 * a_ver)\n t_end = abs(v / a_ver)\n \n b1 = (v**2 + t_end**2)**(0.5)\n b2 = ((v + n * a_ver * dt)**2 + (t_end + dt)**2)**(0.5)\n s1 = ((a_ver * dt)**2 + dt**2)**(0.5)\n s2 = dt * 2\n P = (b2 - b1) + s1 + s2\n t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)\n h = 2 * t / (b2 - b1)\n area = n * (t + (b2 - b1) * h)\n \n if (t_end <= dt and stop > (height - area)):\n v_avg = 0\n self.state[i, 8] = 0\n self.state[i, 2] = self.max_height\n elif (stop > (height - area)):\n t_max = 0\n if stop < height:\n a = 2 * (a_ver)**2\n b = 4 * (a_ver) * v\n c = v**2 - 2 * a_ver * height\n t_max = (-b + (b**2 - 4 * a * c)**(0.5)) / (2 * a)\n v_max = v + a_ver * (t_max / dt)\n v_end = 2 * v_max - v - a_ver * dt\n v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)\n self.state[i, 8] = v_end\n else:\n v_avg = v + a_ver * dt / 2\n self.state[i, 8] += a_ver * dt\n elif height < 0:\n n = v / abs(v)\n stop = n * v**2/(2 * a_ver_eff)\n t_end = abs(v / a_ver_eff)\n \n b1 = (v**2 + t_end**2)**(0.5)\n b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)\n s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)\n s2 = dt * 2\n P = (b2 - b1) + s1 + s2\n t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)\n h = 2 * t / (b2 - b1)\n area = n * (t + (b2 - b1) * h)\n \n if (t_end <= dt and abs(stop) <= abs(height)):\n v_avg = (v / 2) * (t_end / dt)\n self.state[i, 8] = v + a_ver_eff * t_end\n elif (stop < (height - area)):\n v_max = (height * (2 * a_ver_eff))**(0.5)\n t_max = (v_max - v)/a_ver_eff\n v_end = 2 * v_max - v - a_ver_eff * dt\n v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)\n self.state[i, 8] = v_end\n else:\n v_avg = v - a_ver_eff * dt / 2\n self.state[i, 8] = v - a_ver_eff * dt\n else:\n self.state[i, 8] += 0 * dt\n \n \n self.state[i, 2] += v_avg * dt\n \n # unit vector\n r = self.state[i, 3:5] - self.state[i, :2]\n m = np.linalg.norm(r)\n u = r / m\n \n #accelearting horizontal\n a_hor = self.acc_hor\n v_hor = self.state[i, 6:8]\n h = np.linalg.norm(v_hor)\n \n stop = h**2/(2 * a_hor)\n t_end = h / a_hor\n \n b1 = (h**2 + t_end**2)**(0.5)\n b2 = ((h + a_hor * dt)**2 + (t_end + dt)**2)**(0.5)\n s1 = ((a_hor * dt)**2 + dt**2)**(0.5)\n s2 = dt*2\n P = (b2 - b1) + s1 + s2\n t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)\n s = 2 * t / (b2 - b1)\n area = (t + (b2 - b1) * s)\n \n if (t_end <= dt and stop < area):\n v_hor = (h / 2) * (t_end / dt)\n self.state[i, 6:8] = (h - (a_hor * t_end)) * u\n elif (stop > (m - area)):\n v_max = (m * (2 * a_hor))**(0.5)\n t_max = (v_max - h)/a_hor\n v_end = 2 * v_max - h - a_hor * dt\n v_hor = ((v_max + h) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)\n self.state[i, 6:8] = v_end * u\n else:\n v_hor = h + a_hor * dt / 2\n self.state[i, 6:8] = (h + a_hor * dt) * u\n \n self.state[i, :2] += (v_hor * dt) * u\n\n #find drones hovering\n done, fund = np.where(D <= 122)\n uniquo = (done == fund)\n done = done[uniquo]\n for d in zip(done):\n print(\"here\")\n #velocity vector\n v = self.state[i, 8]\n v_avg = v\n a_ver_eff = self.acc_vert_eff\n \n #accelerating negative z\n n = -1\n if v < 0:\n n = v / abs(v)\n stop = n * v**2/(2 * a_ver_eff)\n t_end = abs(v / a_ver_eff)\n \n b1 = (v**2 + t_end**2)**(0.5)\n b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)\n s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)\n s2 = dt * 2\n P = (b2 - b1) + s1 + s2\n t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)\n h = 2 * t / (b2 - b1)\n area = n * (t + (b2 - b1) * h)\n \n if (t_end <= dt and stop > area):\n v_avg = (v / 2) * (t_end / dt)\n self.state[i, 8] = v + a_ver_eff * t_end\n self.state[i, 9] = self.time_elapsed\n elif (stop < (-self.state[i, 2] - area)):\n v_max = ((-self.state[i, 2]) * (2 * a_ver_eff))**(0.5)\n t_max = (v_max - v)/a_ver_eff\n v_end = 2 * v_max - v - a_ver_eff * dt\n v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)\n self.state[i, 8] = v_end\n else:\n v_avg = v - a_ver_eff * dt / 2\n self.state[i, 8] = v - a_ver_eff * dt\n \n self.state[i, 2] += v_avg * dt\n\n\n E = squareform(pdist(self.state[:, :3], 'euclidean'))\n ind1, ind2 = np.where(E < (2 * self.size))\n unique = (ind1 < ind2)\n ind1 = ind1[unique]\n ind2 = ind2[unique]\n \n for i1, i2 in zip(ind1, ind2):\n if (self.state[i1, 2] > self.state[i2, 2]):\n self.state[i1, 8] += (self.acc_vert) * dt\n self.state[i2, 8] -= (self.acc_vert_eff) * dt\n else:\n self.state[i1, 8] -= (self.acc_vert) * dt\n self.state[i2, 8] += (self.acc_vert_eff) * dt\n \n if self.obstacles > 0:\n DO = np.vstack([self.state[:, :3].copy(), self.obs_state.copy()])\n F = squareform(pdist(DO, 'euclidean'))\n d_rone, obs = np.where(F < (2 * self.obstacles_size))\n unique = (d_rone < obs and obs >= self.drones)\n d_rone = d_rone[unique]\n obs = obs[unique]\n \n for d, o in zip(d_rone, obs):\n if (self.obs_state[o-self.drones, 2] < 110 and self.state[d, 2] < self.obs_state[o-self.drones, 2]):\n self.state[d, 8] += self.acc_vert * dt\n else:\n r = self.state[d, 3:5] - self.state[d, :2]\n ro = self.obs_state[o-self.drones, :2] - self.state[d, :2]\n \n r_rel = np.cross(r, ro)\n if (r_rel[2] > 0):\n self.state[d, 6] += self.acc_hor * dt\n self.state[d, 7] += self.acc_hor * dt\n else:\n self.state[d, 6] -= self.acc_hor * dt\n self.state[d, 7] -= self.acc_hor * dt\n \n #restrict velocity\n np.clip(self.state[:, 6], -self.max_speed + self.wind[0], self.max_speed + self.wind[0])\n np.clip(self.state[:, 7], -self.max_speed + self.wind[1], self.max_speed + self.wind[1])\n\n\n#------------------------------------------------------------\n# set up initial state\n\nbox = ParticleBox()\ndt = 1. # 1 fps\n \n#ani = animation.FuncAnimation(fig, animate, frames=600, interval=10, init_func=init)\nfor i in range(10):\n box.step(dt)\n\n\n#final = np.hstack([box.init_state[:, :3], box.state[:, 3:]])\n\n#with open('people.csv', 'w') as writeFile:\n# writer = csv.writer(writeFile)\n# writer.writerows(final) #2d list\n\n\"\"\"with open('initial.csv', 'w') as writeInit:\n writer = csv.writer(writeInit)\n writer.writerows(box.init_state)\n \nwriteInit.close()\n \"\"\"\n \nwith open('final_2.csv', 'w') as writeFin:\n writer = csv.writer(writeFin)\n writer.writerows(box.init_state)\n writer.writerows(box.state)\n\nwriteFin.close()\n\nprint(box.state)"
] |
[
[
"numpy.linalg.norm",
"scipy.spatial.distance.pdist",
"numpy.asarray",
"numpy.random.standard_normal",
"numpy.random.seed",
"numpy.ones",
"numpy.where",
"numpy.clip",
"numpy.random.random",
"scipy.spatial.distance.cdist",
"numpy.cross"
]
] |
goktug97/DACBench
|
[
"953bc8efacdb993889b223110e25f7e453c86b2d"
] |
[
"dacbench/envs/sgd.py"
] |
[
"import math\nimport warnings\nfrom functools import reduce\n\nimport numpy as np\nimport torch\nfrom backpack import backpack, extend\nfrom backpack.extensions import BatchGrad\nfrom gym.utils import seeding\nfrom torchvision import datasets, transforms\n\nfrom dacbench import AbstractEnv\n\nwarnings.filterwarnings(\"ignore\")\n\n\nclass SGDEnv(AbstractEnv):\n \"\"\"\n Environment to control the learning rate of adam\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Initialize SGD Env\n\n Parameters\n -------\n config : objdict\n Environment configuration\n \"\"\"\n super(SGDEnv, self).__init__(config)\n\n self.batch_size = config.training_batch_size\n self.validation_batch_size = config.validation_batch_size\n self.no_cuda = config.no_cuda\n self.current_batch_size = config.training_batch_size\n\n self.env_seed = config.seed\n self.seed(self.env_seed)\n\n self.use_cuda = not self.no_cuda and torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.use_cuda else \"cpu\")\n\n self.training_validation_ratio = 0.8\n # self.test_dataset = None\n self.train_dataset = None\n self.validation_dataset = None\n self.train_loader = None\n # self.test_loader = None\n self.validation_loader = None\n self.train_loader_it = None\n self.validation_loader_it = None\n\n self.train_batch_index = 0\n self.epoch_index = 0\n\n self.current_training_loss = None\n self.loss_batch = None\n\n self.model = None\n\n self.parameter_count = 0\n self.layer_sizes = []\n\n self.loss_function = torch.nn.NLLLoss(reduction=\"none\")\n self.loss_function = extend(self.loss_function)\n\n self.initial_lr = config.lr * torch.ones(\n 1, device=self.device, requires_grad=False\n )\n self.current_lr = config.lr * torch.ones(\n 1, device=self.device, requires_grad=False\n )\n\n # Adam parameters\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.m = 0\n self.v = 0\n self.epsilon = 1.0e-08\n self.t = 0\n self.step_count = torch.zeros(1, device=self.device, requires_grad=False)\n\n self.prev_descent = None\n\n self.learning_rate = 0.001\n self.predictiveChangeVarDiscountedAverage = torch.zeros(\n 1, device=self.device, requires_grad=False\n )\n self.predictiveChangeVarUncertainty = torch.zeros(\n 1, device=self.device, requires_grad=False\n )\n self.lossVarDiscountedAverage = torch.zeros(\n 1, device=self.device, requires_grad=False\n )\n self.lossVarUncertainty = torch.zeros(\n 1, device=self.device, requires_grad=False\n )\n self.discount_factor = 0.9\n self.firstOrderMomentum = torch.zeros(\n 1, device=self.device, requires_grad=False\n )\n self.secondOrderMomentum = torch.zeros(\n 1, device=self.device, requires_grad=False\n )\n\n self.writer = None\n\n if \"reward_function\" in config.keys():\n self.get_reward = config[\"reward_function\"]\n else:\n self.get_reward = self.get_default_reward\n\n if \"state_method\" in config.keys():\n self.get_state = config[\"state_method\"]\n else:\n self.get_state = self.get_default_state\n\n def seed(self, seed=None):\n \"\"\"\n Set rng seed\n\n Parameters\n ----------\n seed:\n seed for rng\n \"\"\"\n _, seed = seeding.np_random(seed)\n if seed is not None:\n torch.manual_seed(seed)\n np.random.seed(seed)\n return [seed]\n\n def step(self, action):\n \"\"\"\n Execute environment step\n\n Parameters\n ----------\n action : list\n action to execute\n\n Returns\n -------\n np.array, float, bool, dict\n state, reward, done, info\n \"\"\"\n done = super(SGDEnv, self).step_()\n\n self.step_count += 1\n index = 0\n if not isinstance(action, float):\n action = action[0]\n\n action = torch.Tensor([action]).to(self.device)\n new_lr = 10 ** (-action)\n self.current_lr = new_lr\n delta_w = torch.mul(\n new_lr,\n self.firstOrderMomentum\n / (torch.sqrt(self.secondOrderMomentum) + self.epsilon),\n )\n for i, p in enumerate(self.model.parameters()):\n layer_size = self.layer_sizes[i]\n p.data = p.data - delta_w[index: index + layer_size].reshape(\n shape=p.data.shape\n )\n index += layer_size\n\n self._set_zero_grad()\n reward = self.get_reward(self)\n return self.get_state(self), reward, done, {}\n\n def reset(self):\n \"\"\"\n Reset environment\n\n Returns\n -------\n np.array\n Environment state\n \"\"\"\n super(SGDEnv, self).reset_()\n\n dataset = self.instance[0]\n instance_seed = self.instance[1]\n construct_model = self.instance[2]\n\n self.seed(instance_seed)\n\n self.model = construct_model().to(self.device)\n\n self.training_validation_ratio = 0.8\n\n train_dataloader_args = {\"batch_size\": self.batch_size}\n validation_dataloader_args = {\"batch_size\": self.validation_batch_size}\n if self.use_cuda:\n param = {\"num_workers\": 1, \"pin_memory\": True, \"shuffle\": True}\n train_dataloader_args.update(param)\n validation_dataloader_args.update(param)\n\n if dataset == \"MNIST\":\n transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n )\n\n train_dataset = datasets.MNIST(\n \"../data\", train=True, download=True, transform=transform\n )\n # self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)\n else:\n raise NotImplementedError\n\n training_dataset_limit = math.floor(\n len(train_dataset) * self.training_validation_ratio\n )\n validation_dataset_limit = len(train_dataset)\n\n self.train_dataset = torch.utils.data.Subset(\n train_dataset, range(0, training_dataset_limit - 1)\n )\n self.validation_dataset = torch.utils.data.Subset(\n train_dataset, range(training_dataset_limit, validation_dataset_limit)\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.train_dataset, **train_dataloader_args\n )\n # self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args)\n self.validation_loader = torch.utils.data.DataLoader(\n self.validation_dataset, **validation_dataloader_args\n )\n\n self.train_batch_index = 0\n self.epoch_index = 0\n self.train_loader_it = iter(self.train_loader)\n self.validation_loader_it = iter(self.validation_loader)\n\n self.parameter_count = 0\n self.layer_sizes = []\n for p in self.model.parameters():\n layer_size = reduce(lambda x, y: x * y, p.shape)\n self.layer_sizes.append(layer_size)\n self.parameter_count += layer_size\n\n self.model = extend(self.model)\n\n self._set_zero_grad()\n self.model.train()\n\n self.current_training_loss = None\n self.loss_batch = None\n\n # Adam parameters\n self.m = 0\n self.v = 0\n self.t = 0\n self.step_count = torch.zeros(1, device=self.device, requires_grad=False)\n\n self.current_lr = self.initial_lr\n self.prev_descent = torch.zeros(\n (self.parameter_count,), device=self.device, requires_grad=False\n )\n self.get_default_reward(self)\n\n return self.get_state(self)\n\n def set_writer(self, writer):\n self.writer = writer\n\n def close(self):\n \"\"\"\n No additional cleanup necessary\n\n Returns\n -------\n bool\n Cleanup flag\n \"\"\"\n return True\n\n def render(self, mode: str = \"human\"):\n \"\"\"\n Render env in human mode\n\n Parameters\n ----------\n mode : str\n Execution mode\n \"\"\"\n if mode != \"human\":\n raise NotImplementedError\n\n pass\n\n def get_default_state(self, _):\n \"\"\"\n Gather state description\n\n Returns\n -------\n dict\n Environment state\n\n \"\"\"\n gradients = self._get_gradients()\n self.firstOrderMomentum, self.secondOrderMomentum = self._get_momentum(\n gradients\n )\n (\n predictiveChangeVarDiscountedAverage,\n predictiveChangeVarUncertainty,\n ) = self._get_predictive_change_features(\n self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum\n )\n lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()\n\n state = {\n \"predictiveChangeVarDiscountedAverage\": predictiveChangeVarDiscountedAverage,\n \"predictiveChangeVarUncertainty\": predictiveChangeVarUncertainty,\n \"lossVarDiscountedAverage\": lossVarDiscountedAverage,\n \"lossVarUncertainty\": lossVarUncertainty,\n \"currentLR\": self.current_lr,\n \"trainingLoss\": self.current_training_loss,\n \"validationLoss\": self.current_validation_loss,\n }\n\n return state\n\n def _set_zero_grad(self):\n index = 0\n for i, p in enumerate(self.model.parameters()):\n if p.grad is None:\n continue\n layer_size = self.layer_sizes[i]\n p.grad.zero_()\n index += layer_size\n\n def _train_batch_(self):\n (data, target) = self.train_loader_it.next()\n data, target = data.to(self.device), target.to(self.device)\n self.current_batch_size = data.size()[0]\n output = self.model(data)\n loss = self.loss_function(output, target)\n\n with backpack(BatchGrad()):\n loss.mean().backward()\n\n loss_value = loss.mean()\n reward = self._get_validation_loss()\n self.loss_batch = loss\n self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)\n self.train_batch_index += 1\n\n return reward\n\n def get_default_reward(self, _):\n try:\n reward = self._train_batch_()\n except StopIteration:\n self.train_batch_index = 0\n self.epoch_index += 1\n self.train_loader_it = iter(self.train_loader)\n reward = self._train_batch_()\n\n return reward\n\n def _get_val_loss(self):\n self.model.eval()\n validation_loss = torch.zeros(1, device=self.device, requires_grad=False)\n with torch.no_grad():\n for data, target in self.validation_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n validation_loss += self.loss_function(output, target).mean()\n\n validation_loss /= len(self.validation_loader.dataset)\n self.model.train()\n return validation_loss\n\n def _get_validation_loss_(self):\n self.model.eval()\n (data, target) = self.validation_loader_it.next()\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n validation_loss = self.loss_function(output, target).mean()\n validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)\n self.current_validation_loss = validation_loss\n self.model.train()\n\n return -validation_loss.item() # negative because it is the reward\n\n def _get_validation_loss(self):\n try:\n validation_loss = self._get_validation_loss_()\n except StopIteration:\n self.validation_loader_it = iter(self.validation_loader)\n validation_loss = self._get_validation_loss_()\n\n return validation_loss\n\n def _get_gradients(self):\n gradients = []\n for p in self.model.parameters():\n if p.grad is None:\n continue\n gradients.append(p.grad.flatten())\n\n gradients = torch.cat(gradients, dim=0)\n\n return gradients\n\n def _get_momentum(self, gradients):\n self.t += 1\n self.m = self.beta1 * self.m + (1 - self.beta1) * gradients\n self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)\n bias_corrected_m = self.m / (1 - self.beta1 ** self.t)\n bias_corrected_v = self.v / (1 - self.beta2 ** self.t)\n\n return bias_corrected_m, bias_corrected_v\n\n def _get_adam_feature(self, learning_rate, m, v):\n epsilon = 1.0e-8\n return torch.mul(learning_rate, m / (torch.sqrt(v) + epsilon))\n\n def _get_loss_features(self):\n with torch.no_grad():\n loss_var = torch.log(torch.var(self.loss_batch))\n\n self.lossVarDiscountedAverage = (\n self.discount_factor * self.lossVarDiscountedAverage\n + (1 - self.discount_factor) * loss_var\n )\n self.lossVarUncertainty = (\n self.discount_factor * self.lossVarUncertainty\n + (1 - self.discount_factor)\n * (loss_var - self.lossVarDiscountedAverage) ** 2\n )\n\n return self.lossVarDiscountedAverage, self.lossVarUncertainty\n\n def _get_predictive_change_features(self, lr, m, v):\n batch_gradients = []\n for i, (name, param) in enumerate(self.model.named_parameters()):\n grad_batch = param.grad_batch.reshape(\n self.current_batch_size, self.layer_sizes[i]\n )\n batch_gradients.append(grad_batch)\n\n batch_gradients = torch.cat(batch_gradients, dim=1)\n\n update_value = self._get_adam_feature(lr, m, v)\n predictive_change = torch.log(\n torch.var(-1 * torch.matmul(batch_gradients, update_value))\n )\n\n self.predictiveChangeVarDiscountedAverage = (\n self.discount_factor * self.predictiveChangeVarDiscountedAverage\n + (1 - self.discount_factor) * predictive_change\n )\n self.predictiveChangeVarUncertainty = (\n self.discount_factor * self.predictiveChangeVarUncertainty\n + (1 - self.discount_factor)\n * (predictive_change - self.predictiveChangeVarDiscountedAverage) ** 2\n )\n\n return (\n self.predictiveChangeVarDiscountedAverage,\n self.predictiveChangeVarUncertainty,\n )\n"
] |
[
[
"torch.nn.NLLLoss",
"torch.device",
"torch.cat",
"torch.zeros",
"torch.var",
"torch.sqrt",
"numpy.random.seed",
"torch.square",
"torch.no_grad",
"torch.ones",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.matmul",
"torch.utils.data.DataLoader",
"torch.Tensor"
]
] |
jomavera/DRL_HFV
|
[
"043e32805ec79fd35281b864659c194d7b89f5bc"
] |
[
"infer.py"
] |
[
"import numpy as np\nfrom env import Env\nfrom models import PolicyNet, Critic\nfrom utils import one_hot\nimport torch\nfrom torch.optim import Adam\nimport time\nimport os\nfrom datetime import datetime\nimport math\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n#------------------------SET PARAMETERS----------------------------\n\nSEED = 17\nBATCH_SIZE = 128\nN_NODES = 11\nN_DEPOT = 1\nNUM_LAYERS = 1\nCAPACITY = [20,15,10]\nMAX_DEMAND = 10\nN_VEHICLES = len(CAPACITY)\nDIM_STATIC = 2\nDIM_DYNAMIC = 1 + N_VEHICLES\nDIM_LOAD = N_VEHICLES\nDIM_EMBED = 128\nMAX_EP_lEN = 16\nGAMMA = 0.99\nENTROPY_REG = 0.01\nMAX_GRAD_NORM = 2\nDROPOUT = 0.1\nEMBED_TYPE = 'conv1d'\nLOG_INTERVAL = 200\n\n#----------------INITIALIZE ENVIROMENT AND POLICIES----------------\n\nenv_test = Env(seed = SEED, batch_size = BATCH_SIZE, capacity = CAPACITY,\n n_nodes = N_NODES, n_depot = N_DEPOT, max_demand = MAX_DEMAND, n_agents = N_VEHICLES)\n\npolicy = [PolicyNet(batch_size = BATCH_SIZE, n_nodes = N_NODES, n_agents=N_VEHICLES, num_layers = NUM_LAYERS,\n dim_s = DIM_STATIC, dim_d = DIM_DYNAMIC,\n dim_embed = DIM_EMBED, n_glimpses = 0, embeding_type=EMBED_TYPE,\n dropout = DROPOUT).to(device) for i in range(N_VEHICLES)]\n\n#------------------LOAD TRAINDEL MODEL---------------------------\nmodel_dir = 'weights/model_exp_1.pt'\npolicy_name = \"policy_agent_X\"\n\nif os.path.isfile(model_dir):\n checkpoint = torch.load(model_dir,map_location=device)\nelse:\n raise ValueError('No model file!')\n\nfor agent_id in range(N_VEHICLES):\n p_name = policy_name.replace(\"X\",str(agent_id))\n policy[agent_id].load_state_dict(checkpoint[p_name])\n\n\n#-----------------RUN TRAINED POLICY----------------\nnum_epochs = math.ceil(1000/BATCH_SIZE)\n\ntotal_tests = []\ntotal_times = []\nfor i in range(num_epochs):\n start = time.time()\n o_t, d_t, r_t = env_test.reset(), False, 0\n\n actions_ep = []\n log_probs_ep = []\n rewards_ep = []\n values_ep = []\n last_hh_t = [None]*N_VEHICLES\n for t in range(int(MAX_EP_lEN) ):\n actions = []\n actions_one_hot = []\n log_probs = []\n values = []\n for agent_id in range(N_VEHICLES) :\n model = policy[agent_id].eval()\n logits, prob , log_p, last_hh_t[agent_id] = model(o_t, last_hh_t[agent_id], agent_id)\n\n #--------- GREEDY POLICY ------------\n act = torch.argmax(prob, dim =1) # [ batch size ]\n actions.append(act.detach())\n\n ot_2, d_t, r_t = env_test.step(act.detach().unsqueeze(1), agent_id)\n o_t = ot_2\n values.append( r_t )\n\n r_step = torch.stack(values, dim = 1) #[batch_size, n_agents]\n a = torch.stack(actions, dim = 1) #[batch_size, n_agents]\n actions_ep.append(a)\n rewards_ep.append(r_step)\n end = time.time()\n rewards = torch.stack(rewards_ep, dim = 2 ).sum(dim=2).sum(dim=1) #[batch_size, n_agents, ep_len]\n total_tests.append(rewards)\n total_times.append((end-start)/BATCH_SIZE)\n\n#------------------- SAVE RESULTS -----------------------\nrewards_total = torch.stack(total_tests, dim=1).reshape(-1,)\nnp_results = rewards_total.numpy()\nnp.save('vrp_results_RL',np_results)\nnp_runtimes = np.array(total_times).reshape(-1,)\nnp.save('vrp_runtimes_RL',np_runtimes)\n"
] |
[
[
"numpy.array",
"torch.stack",
"numpy.save",
"torch.cuda.is_available",
"torch.load",
"torch.argmax"
]
] |
TUM-LMF/fieldRNN
|
[
"5e9e17b170fe000ae15a73a276742aea84e6410b"
] |
[
"evaluate.py"
] |
[
"import tensorflow as tf\nimport cPickle as pickle\nimport rnn_model\nimport cnn_model\nfrom dataloader import Dataloader\nimport os\nimport datetime\nimport numpy as np\nimport argparse\nfrom cnn_model import unroll\n\ndef main():\n parser = argparse.ArgumentParser(description='Evaluate .')\n\n parser.add_argument('rundir', type=str, help='directory of tf checkpoint file')\n parser.add_argument('--model', type=str, help=\"Neural network architecture. 'lstm', 'rnn' or 'cnn' (default lstm)\", default='lstm')\n parser.add_argument('--gpu', type=int, help=\"Select gpu (e.g. 0), via environment variable CUDA_VISIBLE_DEVICES (default None)\", default=None)\n\n args = parser.parse_args()\n\n \"\"\" GPU management \"\"\"\n allow_gpu_mem_growth = True\n gpu_memory_fraction = 1\n gpu_id = args.gpu\n\n if args.gpu is not None:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_id\n\n dataloader = Dataloader(datafolder=\"data/eval\", batchsize=500)\n #dataloader = Dataloader(conn=conn, batch_size=args.batchsize, sql_where=args.sqlwhere,\n # debug=False,\n # do_shuffle=False, do_init_shuffle=True, tablename=args.tablename)\n\n \"\"\"\n Load\n parameters\n from init_from model\n \"\"\"\n with open(os.path.join(args.rundir, \"args.pkl\"), \"rb\") as f:\n modelargs = pickle.load(f)\n\n \"\"\"\n Create\n new\n model\n object\n with same parameter \"\"\"\n print(\"building model graph\")\n\n if args.model in [\"rnn\",\"lstm\"]:\n model = rnn_model.Model(n_input=modelargs[\"n_input\"], n_classes=modelargs[\"n_classes\"], n_layers=modelargs[\"n_layers\"], batch_size=dataloader.batchsize,\n adam_lr=modelargs[\"adam_lr\"],rnn_cell_type=args.model , dropout_keep_prob=modelargs[\"dropout_keep_prob\"], n_cell_per_input=modelargs[\"n_cell_per_input\"], gpu=0)\n evaluate=evaluate_rnn\n\n if args.model == \"cnn\":\n model = cnn_model.Model(n_input=modelargs[\"n_input\"], n_classes=modelargs[\"n_classes\"], n_layers=modelargs[\"n_layers\"],\n adam_lr=1e-3, dropout_keep_prob=modelargs[\"dropout_keep_prob\"], n_cell_per_input=modelargs[\"n_cell_per_input\"], gpu=gpu_id)\n evaluate = evaluate_cnn\n\n probabilities, targets, observations = evaluate(model,dataloader,\n init_dir=args.rundir,\n print_every=20,\n gpu_memory_fraction=gpu_memory_fraction,\n allow_gpu_mem_growth=allow_gpu_mem_growth)\n\n #np.save(os.path.join(args.rundir, \"eval_confusion_matrix.npy\"), confusion_matrix)\n np.save(os.path.join(args.rundir, \"eval_probabilities.npy\"), probabilities)\n np.save(os.path.join(args.rundir, \"eval_targets.npy\"), targets)\n np.save(os.path.join(args.rundir, \"eval_observations.npy\"), observations)\n\ndef evaluate_rnn(model,\n dataloader,\n print_every=5,\n init_dir=None,\n allow_gpu_mem_growth=True,\n gpu_memory_fraction=0.3):\n \"\"\"\n This function initialized a model from the <init_from> directory and calculates\n probabilities, and confusion matrices based on all data stored in\n one epoch of dataloader (usually test data)\n\n\n :param model: rnn_model object containing tensorflow graph\n :param dataloader: DataLoader object for loading batches\n :param print_every: console log frequency\n :param allow_gpu_mem_growth: dynamic growth of gpu vram\n :param gpu_memory_fraction: hard upper limit for gpu vram\n\n :returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted\n :returns probabilities <float> [all observations x n_classes] probabilities for each class per observation\n :returns targets <bool> [all observations x n_classes] reference data for each class per observation\n :returns observations <int> [all_observations]position of observation in the sequence\n e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]\n \"\"\"\n\n saver = tf.train.Saver()\n\n # container for output data\n total_cm = np.zeros((model.n_classes, model.n_classes))\n all_scores = np.array([])\n all_targets = np.array([])\n all_obs = np.array([])\n\n step = 0\n t_last = datetime.datetime.now()\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = allow_gpu_mem_growth\n config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction\n config.allow_soft_placement = True\n\n print(\"start\")\n with tf.Session(config=config) as sess:\n\n sess.run([model.init_op])\n if init_dir is not None:\n if os.path.exists(init_dir):\n ckpt = tf.train.get_checkpoint_state(init_dir)\n print(\"restoring model from %s\" % ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n for i in range(1, dataloader.num_batches):\n\n # step as number of features -> invariant to changes in batch size\n step += dataloader.batch_size\n\n s_db = datetime.datetime.now()\n X, y, seq_lengths = dataloader.next_batch()\n e_db = datetime.datetime.now()\n\n feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}\n\n cm, scores, targets, obs = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs],\n feed_dict=feed)\n\n all_obs = np.append(all_obs, obs)\n all_scores = np.append(all_scores, scores)\n all_targets = np.append(all_targets, targets)\n #total_cm += cm\n\n e_tr = datetime.datetime.now()\n\n dt_db = e_db - s_db\n dt_tr = e_tr - e_db\n\n field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()\n # approximate calculation time\n approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)\n eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)\n\n t_last = datetime.datetime.now()\n\n if i % print_every == 0:\n cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)\n msg = \"Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} \" \\\n \"(time: db {}ms; eval {}ms, {} feat/s, eta: {})\".format(\n i,\n step,\n dataloader.epoch,\n dataloader.batch,\n dataloader.num_batches,\n cross_entropy,\n int(dt_db.total_seconds() * 1000),\n int(dt_tr.total_seconds() * 1000),\n int(field_per_s),\n eta.strftime(\"%d.%b %H:%M\")\n )\n print(msg)\n\n return all_scores.reshape(-1, model.n_classes), \\\n all_targets.reshape(-1, model.n_classes).astype(bool), \\\n all_obs\n\ndef evaluate_cnn(model,\n dataloader,\n print_every=5,\n init_dir=None,\n allow_gpu_mem_growth=True,\n gpu_memory_fraction=0.3):\n \"\"\"\n This function initialized a model from the <init_from> directory and calculates\n probabilities, and confusion matrices based on all data stored in\n one epoch of dataloader (usually test data)\n\n\n :param model: rnn_model object containing tensorflow graph\n :param dataloader: DataLoader object for loading batches\n :param print_every: console log frequency\n :param allow_gpu_mem_growth: dynamic growth of gpu vram\n :param gpu_memory_fraction: hard upper limit for gpu vram\n\n :returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted\n :returns probabilities <float> [all observations x n_classes] probabilities for each class per observation\n :returns targets <bool> [all observations x n_classes] reference data for each class per observation\n :returns observations <int> [all_observations]position of observation in the sequence\n e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]\n \"\"\"\n\n saver = tf.train.Saver()\n\n # container for output data\n total_cm = np.zeros((model.n_classes, model.n_classes))\n all_scores = np.array([])\n all_targets = np.array([])\n all_obs = np.array([])\n\n step = 0\n t_last = datetime.datetime.now()\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = allow_gpu_mem_growth\n config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction\n config.allow_soft_placement = True\n\n print(\"start\")\n with tf.Session(config=config) as sess:\n\n sess.run([model.init_op])\n if init_dir is not None:\n if os.path.exists(init_dir):\n ckpt = tf.train.get_checkpoint_state(init_dir)\n print(\"restoring model from %s\" % ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n with open(init_dir + \"/steps.txt\", \"r\") as f:\n line = f.read()\n step_, epoch_ = line.split(\" \")\n step = int(step_)\n dataloader.epoch = int(epoch_)\n\n for i in range(1, dataloader.num_batches):\n\n # step as number of features -> invariant to changes in batch size\n step += dataloader.batch_size\n\n s_db = datetime.datetime.now()\n X, y, seq_lengths = dataloader.next_batch()\n e_db = datetime.datetime.now()\n\n # unroll also index of observation. -> TODO integrate in unroll function, but need to update also dependencies\n batch_size, max_seqlengths, n_input = X.shape\n ones = np.ones([batch_size, max_seqlengths])\n mask_ = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T\n mask = mask_.reshape(-1)\n obs_ = np.arange(0, max_seqlengths) * ones\n obs = obs_.reshape(-1)[mask]\n\n \"\"\" unroll data \"\"\"\n X, y = unroll(X, y, seq_lengths)\n\n feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}\n\n scores, targets = sess.run([model.scores, model.targets],\n feed_dict=feed)\n\n all_scores = np.append(all_scores, scores)\n all_targets = np.append(all_targets, targets)\n\n e_tr = datetime.datetime.now()\n\n dt_db = e_db - s_db\n dt_tr = e_tr - e_db\n\n field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()\n # approximate calculation time\n approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)\n eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)\n\n t_last = datetime.datetime.now()\n\n if i % print_every == 0:\n cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)\n msg = \"Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} \" \\\n \"(time: db {}ms; eval {}ms, {} feat/s, eta: {})\".format(\n i,\n step,\n dataloader.epoch,\n dataloader.batch,\n dataloader.num_batches,\n cross_entropy,\n int(dt_db.total_seconds() * 1000),\n int(dt_tr.total_seconds() * 1000),\n int(field_per_s),\n eta.strftime(\"%d.%b %H:%M\")\n )\n print(msg)\n\n return all_scores.reshape(-1, model.n_classes), \\\n all_targets.reshape(-1, model.n_classes).astype(bool), \\\n obs\n\nif __name__ == '__main__':\n main()"
] |
[
[
"numpy.array",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.ones",
"tensorflow.train.get_checkpoint_state",
"tensorflow.ConfigProto",
"numpy.arange",
"numpy.append"
]
] |
ohtaman/pynm
|
[
"b003962201e4270d0dab681ede37f2d8edd560f2"
] |
[
"pynm/commands/metric.py"
] |
[
"# -*- coding:utf-8 -*-\n\nimport csv\nimport fileinput\nimport sys\n\nimport numpy\n\nfrom pynm.feature.metric.itml import learn_metric, convert_data\n\n\nclass ItmlCommand:\n name = 'itml'\n help = 'Information Theoretic Metric Learning'\n\n @classmethod\n def build_arg_parser(cls, parser):\n parser.add_argument('-i',\n '--input_data',\n default='-',\n type=str,\n metavar='FILE',\n help='input data file (default: stdin)')\n label_or_pair = parser.add_mutually_exclusive_group(required=True)\n label_or_pair.add_argument('-l',\n '--input_labels',\n default=None,\n type=str,\n metavar='FILE',\n help='input labels file')\n label_or_pair.add_argument('-p',\n '--input_pairs',\n default=None,\n type=str,\n metavar='FILE',\n help='input pairs file')\n parser.add_argument('-o',\n '--output_data',\n default=None,\n type=str,\n metavar='FILE',\n help='output data file')\n parser.add_argument('-m',\n '--output_metric',\n default=None,\n type=str,\n metavar='FILE',\n help='output metric file')\n parser.add_argument('-w',\n '--output_weights',\n default=None,\n type=str,\n metavar='FILE',\n help='output weights file')\n\n parser.add_argument('-d',\n '--delimiter',\n default='\\t',\n type=str,\n metavar='DELIM',\n help='delimiter (default: \"\\\\t\")')\n parser.add_argument('-s',\n '--sparse',\n action='store_true',\n help='sparse format (not implemented yet)')\n parser.add_argument('--header',\n action='store_true',\n help='has header')\n\n parser.add_argument('-U',\n '--u_param',\n default=1.0,\n type=float,\n metavar='DISTANCE',\n help='U parameter (max distance for same labels, default: 1.0)')\n parser.add_argument('-L',\n '--l_param',\n default=1.0,\n type=float,\n metavar='DISTANCE',\n help='L parameter (min distance for different labels, default: 1.0)')\n parser.add_argument('-S',\n '--slack',\n default=1.0,\n type=float,\n metavar='SLACK',\n help='slack variable (default: 1.0)')\n parser.add_argument('-N',\n '--max_iteration_number',\n default=1000,\n type=int,\n metavar='MAX',\n help='max iteration (default: 1000)')\n\n def run(self, args):\n with fileinput.input(args.input_data) as in_:\n header, data = self.load_data(in_,\n delimiter=args.delimiter,\n has_header=args.header)\n\n if args.input_labels is not None:\n with fileinput.input(args.input_labels) as in_:\n labels = self.load_labels(in_)\n pairs = None\n elif args.input_pairs is not None:\n with fileinput.input(args.input_pairs) as in_:\n pairs = self.load_pairs(in_)\n labels = None\n\n metric = learn_metric(data,\n labels=labels,\n pairs=pairs,\n u=args.u_param,\n l=args.l_param,\n slack=args.slack,\n max_iter=args.max_iteration_number,\n is_sparse=args.sparse)\n\n if args.output_metric is not None:\n if args.output_metric == '-':\n self.export_metric(sys.stdout, metric, header)\n else:\n with open(args.output_metric, 'w') as o_:\n self.export_metric(o_, metric, header)\n if args.output_weights is not None:\n weights = numpy.diag(metric)\n if args.output_weights == '-':\n self.export_weights(sys.stdout, weights, header)\n else:\n with open(args.output_weights, 'w') as o_:\n self.export_weights(o_, weights, header)\n if args.output_data is not None:\n converted_data = convert_data(metric, data)\n if args.output_data == '-':\n self.export_data(sys.stdout, converted_data, header)\n else:\n with open(args.output_data, 'w') as o_:\n self.export_data(o_, converted_data, header)\n return 0\n\n def load_data(self,\n input_data,\n delimiter='\\t',\n has_header=False):\n reader = csv.reader(input_data, delimiter=delimiter)\n if has_header:\n header = {value: key for key, value in enumerate(reader.next())}\n else:\n header = None\n\n data = []\n for row in reader:\n data.append(numpy.array(list(map(lambda x: float(x), row))))\n return header, data\n\n def load_labels(self, input_labels):\n return list(map(lambda x: int(x), input_labels))\n\n def load_pairs(self, input_pairs, delimiter='\\t', header=None):\n pairs = []\n if header is None:\n for line in input_pairs:\n row = line.split(delimiter)\n idx1 = int(row[0])\n idx2 = int(row[1])\n similar = int(row[2]) > 0\n pairs.append((idx1, idx2, similar))\n else:\n for line in input_pairs:\n row = line.split(delimiter)\n idx1 = header[row[0]]\n idx2 = header[row[1]]\n similar = int(row[2]) > 0\n pairs.append((idx1, idx2, similar))\n return pairs\n\n def export_metric(self,\n output,\n metric,\n header=None,\n sparse=False):\n if sparse:\n raise NotImplementedError('sparse is not supported yet.')\n\n writer = csv.writer(output)\n if header is not None:\n writer.writerow(header)\n for row in metric:\n writer.writerow(row)\n\n def export_weights(self,\n output,\n weights,\n header=None):\n writer = csv.writer(output)\n if header is not None:\n writer.writerow(header)\n writer.writerow(weights)\n\n def export_data(self,\n output,\n data,\n header=None,\n sparse=False):\n if sparse:\n raise NotImplementedError('sparse is not supported yet.')\n\n writer = csv.writer(output)\n if header is not None:\n writer.writerow(header)\n for row in data:\n writer.writerow(row)\n\n\nclass MetricCommand:\n name = 'metric'\n help = 'Metric Learning'\n\n sub_commands = [ItmlCommand]\n default_command = sub_commands[0]\n\n def build_arg_parser(self, parser):\n self.default_command.build_arg_parser(parser)\n subparsers = parser.add_subparsers(title='algorithm', dest='algorithm')\n for command in self.sub_commands:\n subparser = subparsers.add_parser(command.name, help=command.help)\n command.build_arg_parser(subparser)\n\n def run(self, args):\n sub_command = self._get_sub_command(args.algorithm)\n return sub_command.run(args)\n\n def _get_sub_command(self, algorithm):\n if algorithm is None:\n return self.default_command()\n return next(filter(lambda x: x.name == algorithm, self.sub_commands))()\n"
] |
[
[
"numpy.diag"
]
] |
michaeldeistler/sbibm-1
|
[
"b9781c610a1a80d2de014ee46a29cf061fb6074a"
] |
[
"sbibm/third_party/kgof/test/test_goftest.py"
] |
[
"\"\"\"\nModule for testing goftest module.\n\"\"\"\n\n__author__ = \"wittawat\"\n\nimport unittest\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.testing as testing\nimport scipy.stats as stats\n\nimport sbibm.third_party.kgof.data as data\nimport sbibm.third_party.kgof.density as density\nimport sbibm.third_party.kgof.glo as glo\nimport sbibm.third_party.kgof.goftest as gof\nimport sbibm.third_party.kgof.kernel as kernel\nimport sbibm.third_party.kgof.util as util\n\n\nclass TestFSSD(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_basic(self):\n \"\"\"\n Nothing special. Just test basic things.\n \"\"\"\n seed = 12\n # sample\n n = 100\n alpha = 0.01\n for d in [1, 4]:\n mean = np.zeros(d)\n variance = 1\n isonorm = density.IsotropicNormal(mean, variance)\n\n # only one dimension of the mean is shifted\n # draw_mean = mean + np.hstack((1, np.zeros(d-1)))\n draw_mean = mean + 0\n draw_variance = variance + 1\n X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean\n dat = data.Data(X)\n\n # Test\n for J in [1, 3]:\n sig2 = util.meddistance(X, subsample=1000) ** 2\n k = kernel.KGauss(sig2)\n\n # random test locations\n V = util.fit_gaussian_draw(X, J, seed=seed + 1)\n null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)\n fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)\n\n tresult = fssd.perform_test(dat, return_simulated_stats=True)\n\n # assertions\n self.assertGreaterEqual(tresult[\"pvalue\"], 0)\n self.assertLessEqual(tresult[\"pvalue\"], 1)\n\n def test_optimized_fssd(self):\n \"\"\"\n Test FSSD test with parameter optimization.\n \"\"\"\n seed = 4\n # sample size\n n = 179\n alpha = 0.01\n for d in [1, 3]:\n mean = np.zeros(d)\n variance = 1.0\n p = density.IsotropicNormal(mean, variance)\n # Mean difference. obvious reject\n ds = data.DSIsotropicNormal(mean + 4, variance + 0)\n dat = ds.sample(n, seed=seed)\n # test\n for J in [1, 4]:\n opts = {\"reg\": 1e-2, \"max_iter\": 10, \"tol_fun\": 1e-3, \"disp\": False}\n tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)\n\n Xtr = tr.X\n gwidth0 = util.meddistance(Xtr, subsample=1000) ** 2\n # random test locations\n V0 = util.fit_gaussian_draw(Xtr, J, seed=seed + 1)\n V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_locs_widths(\n p, tr, gwidth0, V0, **opts\n )\n\n # construct a test\n k_opt = kernel.KGauss(gw_opt)\n null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)\n fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)\n fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)\n assert fssd_opt_result[\"h0_rejected\"]\n\n def test_auto_init_opt_fssd(self):\n \"\"\"\n Test FSSD-opt test with automatic parameter initialization.\n \"\"\"\n seed = 5\n # sample size\n n = 191\n alpha = 0.01\n for d in [1, 4]:\n mean = np.zeros(d)\n variance = 1.0\n p = density.IsotropicNormal(mean, variance)\n # Mean difference. obvious reject\n ds = data.DSIsotropicNormal(mean + 4, variance + 0)\n dat = ds.sample(n, seed=seed)\n # test\n for J in [1, 3]:\n opts = {\"reg\": 1e-2, \"max_iter\": 10, \"tol_fun\": 1e-3, \"disp\": False}\n tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)\n\n V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_auto_init(\n p, tr, J, **opts\n )\n\n # construct a test\n k_opt = kernel.KGauss(gw_opt)\n null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)\n fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)\n fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)\n assert fssd_opt_result[\"h0_rejected\"]\n\n def test_ustat_h1_mean_variance(self):\n seed = 20\n # sample\n n = 200\n alpha = 0.01\n for d in [1, 4]:\n mean = np.zeros(d)\n variance = 1\n isonorm = density.IsotropicNormal(mean, variance)\n\n draw_mean = mean + 2\n draw_variance = variance + 1\n X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean\n dat = data.Data(X)\n\n # Test\n for J in [1, 3]:\n sig2 = util.meddistance(X, subsample=1000) ** 2\n k = kernel.KGauss(sig2)\n\n # random test locations\n V = util.fit_gaussian_draw(X, J, seed=seed + 1)\n\n null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)\n fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)\n fea_tensor = fssd.feature_tensor(X)\n\n u_mean, u_variance = gof.FSSD.ustat_h1_mean_variance(fea_tensor)\n\n # assertions\n self.assertGreaterEqual(u_variance, 0)\n # should reject H0\n self.assertGreaterEqual(u_mean, 0)\n\n def tearDown(self):\n pass\n\n\n# end class TestFSSD\n\n\nclass TestSteinWitness(unittest.TestCase):\n def test_basic(self):\n d = 3\n p = density.IsotropicNormal(mean=np.zeros(d), variance=3.0)\n q = density.IsotropicNormal(mean=np.zeros(d) + 2, variance=3.0)\n k = kernel.KGauss(2.0)\n\n ds = q.get_datasource()\n n = 97\n dat = ds.sample(n, seed=3)\n\n witness = gof.SteinWitness(p, k, dat)\n # points to evaluate the witness\n J = 4\n V = np.random.randn(J, d) * 2\n evals = witness(V)\n\n testing.assert_equal(evals.shape, (J, d))\n\n\n# end class TestSteinWitness\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.sqrt",
"numpy.random.randn",
"numpy.zeros",
"numpy.testing.assert_equal"
]
] |
deepmind/brave
|
[
"0ae20d9afcf6b1fa4d31d70c906d711901b56e9c"
] |
[
"brave/evaluate_video_embeddings.py"
] |
[
"# Copyright 2021 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A runnable program to evaluate video embeddings.\n\nGiven a model checkpoint, and the location of the shards for a dataset,\ncomputes the performance of the Brave video embeddings. This code\nmay be used to evaluate both UCF101 and HMDB51, as long as they are both\ngiven in the appropriate input format. The only hyperparameter to this program\nis the svm_regularization constant, which can impact the performance of the\nlinear classification.\n\"\"\"\n\nimport glob\nimport json\n\nfrom absl import app\nfrom absl import flags\nimport chex\nimport jax\nimport numpy as np\nimport tensorflow as tf\n\nfrom brave.datasets import datasets\nfrom brave.evaluate import evaluate_video_embedding\nfrom brave.models.brave import brave\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('checkpoint_path', None, 'Checkpoint to evaluate.')\nflags.DEFINE_integer('batch_size', None, 'The size of the batches to use.')\n\n# Hyperparameters\nflags.DEFINE_float('svm_regularization', None, 'Regularization constant.')\n\n# Datasets\nflags.DEFINE_string('train_dataset_shards', None,\n 'Glob pattern for train shards.')\nflags.DEFINE_string('test_dataset_shards', None,\n 'Glob pattern for test shards.')\n\n# Transformations to apply to video before running network.\nflags.DEFINE_integer('num_video_frames', 32, 'Number of frames in eval videos.')\nflags.DEFINE_integer('video_step', 2, 'The step to use in the eval videos.')\nflags.DEFINE_integer('image_size', 224, 'The size of the video to evaluate.')\n\n\ndef main(_):\n checkpoint_path = FLAGS.checkpoint_path\n\n train_shards = glob.glob(FLAGS.train_dataset_shards)\n test_shards = glob.glob(FLAGS.test_dataset_shards)\n\n video_config = evaluate_video_embedding.VideoConfig(\n num_frames=FLAGS.num_video_frames,\n image_size=FLAGS.image_size,\n video_step=FLAGS.video_step,\n )\n\n video_embedding_fn = _video_embedding(checkpoint_path)\n\n results = evaluate_video_embedding.evaluate_video_embedding(\n train_dataset_shards=train_shards,\n test_dataset_shards=test_shards,\n embedding_fn=video_embedding_fn,\n config=video_config,\n svm_regularization=FLAGS.svm_regularization,\n batch_size=FLAGS.batch_size)\n\n results_dct = dict(\n top_1_train=results.train.top_one_accuracy,\n top_5_train=results.train.top_five_accuracy,\n top_1_test=results.test.top_one_accuracy,\n top_5_test=results.test.top_five_accuracy,\n )\n\n # Write the results to stdout in a way that can be used as input to other\n # programs.\n print(json.dumps(results_dct))\n\n\ndef _video_embedding(checkpoint_path: str):\n \"\"\"Load the video embedding for the BraVe model to evaluate.\"\"\"\n\n checkpoint = np.load(checkpoint_path, allow_pickle=True).item()\n params = checkpoint['params']\n state = checkpoint['state']\n brave_config_dct = checkpoint['config']\n\n brave_config = brave.BraveConfig(**brave_config_dct)\n model = brave.get_model(brave_config)\n\n @jax.jit\n def embedding_fn(view: datasets.View) -> chex.Array:\n narrow_forward_fn = model.forward_fns['narrow_video']\n embedding, _ = narrow_forward_fn(params, state, None, view, False)\n return embedding\n\n def synchronous_embedding_fn(view: datasets.View) -> chex.Array:\n # jax.jit causes the above function to be executed lazily, but we want\n # to force the computation to happen synchronously.\n return jax.device_get(embedding_fn(view))\n\n return synchronous_embedding_fn\n\n\nif __name__ == '__main__':\n try:\n tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.\n except tf.errors.NotFoundError:\n pass\n\n flags.mark_flag_as_required('checkpoint_path')\n flags.mark_flag_as_required('batch_size')\n flags.mark_flag_as_required('train_dataset_shards')\n flags.mark_flag_as_required('test_dataset_shards')\n flags.mark_flag_as_required('svm_regularization')\n\n app.run(main)\n"
] |
[
[
"numpy.load",
"tensorflow.config.set_visible_devices"
]
] |
helkebir/Reachable-Set-Inner-Approximation
|
[
"4e05780b692214c26c76692f65f61d2f7f506e79"
] |
[
"geometry_tools.py"
] |
[
"import numpy as np\nfrom shapely import geometry\n\ndef shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]:\n \"\"\"Shrinks a 2D polygon by a given distance.\n\n The coordinates of the polygon are expected as an N x 2-matrix,\n and a positive distance results in inward shrinking.\n \n An empty set is returned if the shrinking operation removes all\n original elements.\n\n Args:\n coords: A matrix of coordinates.\n dist: The distance to shrink by.\n\n Returns:\n A tuple containing the x, y coordinates of the original set, as\n well as the x and y coordinates of the shrunken set, in that\n order.\n \"\"\"\n my_polygon = geometry.Polygon(coords)\n xy = my_polygon.exterior.xy\n \n my_polygon_shrunken = my_polygon.buffer(-dist)\n \n try:\n xys = my_polygon_shrunken.exterior.xy\n except AttributeError:\n xys = ([0], [0]) # Empty set\n \n return (*xy, *xys)\n\ndef hausdorff(A: np.ndarray, B: np.ndarray) -> float:\n \"\"\"Computes the Hausdorff distance between two 2D polygons.\n\n Args:\n A: A matrix defining the first polygon.\n B: A matrix defining the second polygon.\n \n Returns:\n A float representing the Hausdorff distance.\n \"\"\"\n return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))\n\ndef read_polygon(file: str) -> np.ndarray:\n \"\"\"Reads a polygon from a table.\n\n Args:\n file: Path to a file containing a plain text, tab-separated\n table with scalars.\n \n Returns:\n A matrix containing the data in the file.\n \"\"\"\n return np.genfromtxt(file)\n\nif __name__ == \"__main__\":\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n # Distance to shrink by\n dh = 0.01\n\n x, y, xs, ys = shrink(read_polygon('example.txt'), dh)\n\n ax = plt.subplot()\n ax.grid(which='major', alpha=0.5, color='k')\n ax.grid(which='minor', alpha=0.3, color='k', linestyle=':')\n ax.minorticks_on()\n ax.set_axisbelow(True)\n\n ax.fill(x, y, color='b', facecolor='lightskyblue',\n edgecolor='dodgerblue', label='Original', alpha=0.75)\n ax.fill(xs, ys, facecolor='mediumseagreen', edgecolor='forestgreen',\n label='Shrunk', alpha=0.75)\n ax.set_aspect('equal')\n ax.legend()\n\n golden = 0.01017601435813135\n\n assert(np.isclose(\n hausdorff(np.vstack([x, y]).T, np.vstack([xs, ys]).T),\n golden\n ))\n\n print(\"SUCCESS\")\n print(f'Area original: {geometry.Polygon(np.vstack([x, y]).T).area:.6f}')\n print(f'Area shrunk: {geometry.Polygon(np.vstack([xs, ys]).T).area:.6f}')\n plt.show()"
] |
[
[
"matplotlib.pyplot.show",
"numpy.genfromtxt",
"numpy.vstack",
"matplotlib.pyplot.subplot"
]
] |
miroozyx/Magin-Based-loss
|
[
"fedb43af495d60079fe87ecee8b4ad1c59e17cdc"
] |
[
"loss.py"
] |
[
"import tensorflow as tf\nfrom tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance\n\n\ndef dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):\n \"\"\"\n Distance weighted sampling.\n # References\n - [sampling matters in deep embedding learning]\n (https://arxiv.org/abs/1706.07567)\n\n # Arguments:\n labels: 1-D tf.int32 `Tensor` with shape [batch_size] of\n multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should\n be l2 normalized.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n a_indices: indices of anchors.\n anchors: sampled anchor embeddings.\n positives: sampled positive embeddings.\n negatives: sampled negative embeddings.\n \"\"\"\n if not isinstance(neg_multiplier, int):\n raise ValueError(\"`neg_multiplier` must be an integer.\")\n n = tf.size(labels)\n if not isinstance(embeddings, tf.Tensor):\n embeddings = tf.convert_to_tensor(embeddings)\n d = embeddings.shape[1].value\n\n distances = pairwise_distance(embeddings, squared=False)\n # cut off to void high variance.\n distances = tf.maximum(distances, high_var_threshold)\n\n # subtract max(log(distance)) for stability\n log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))\n weights = tf.exp(log_weights - tf.reduce_max(log_weights))\n\n # sample only negative examples by setting weights of the same class examples to 0.\n lshape = tf.shape(labels)\n assert lshape.shape == 1\n labels = tf.reshape(labels, [lshape[0], 1])\n adjacency = tf.equal(labels, tf.transpose(labels))\n adjacency_not = tf.logical_not(adjacency)\n mask = tf.cast(adjacency_not, tf.float32)\n\n # number of negative/positive samples to sampling per sample.\n # For imbalanced data, this sampling method can be a sample weighted method.\n adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))\n m = tf.reduce_sum(adjacency_ex, axis=1)\n if tf.reduce_min(m) == 0:\n m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))\n adjacency_ex += m\n k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier\n\n pos_weights = tf.cast(adjacency_ex, tf.float32)\n\n weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)\n weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)\n\n # anchors indices\n a_indices = tf.reshape(tf.range(n), (-1,1))\n a_indices = tf.tile(a_indices, [1, k])\n a_indices = tf.reshape(a_indices, (-1,))\n\n # negative sampling\n def neg_sampling(i):\n s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)\n return s\n\n n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)\n n_indices = tf.reshape(n_indices, (-1,))\n\n # postive samping\n def pos_sampling(i):\n s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)\n return s\n\n p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)\n p_indices = tf.reshape(p_indices, (-1,))\n\n anchors = tf.gather(embeddings, a_indices, name='gather_anchors')\n positives = tf.gather(embeddings, p_indices, name='gather_pos')\n negatives = tf.gather(embeddings, n_indices, name='gather_neg')\n\n return a_indices, anchors, positives, negatives\n\n\ndef margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,\n nonzero_loss_threshold=1.4, neg_multiplier=1):\n \"\"\"\n Computes the margin base loss.\n # References\n - [sampling matters in deep embedding learning]\n (https://arxiv.org/abs/1706.07567)\n\n Args:\n labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.\n beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.\n margin: Float, margin term in the loss function.\n nu: float. Regularization parameter for beta.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n margin_based_Loss: tf.float32 scalar\n \"\"\"\n\n a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,\n embeddings,\n high_var_threshold=high_var_threshold,\n nonzero_loss_threshold=nonzero_loss_threshold,\n neg_multiplier=neg_multiplier)\n if isinstance(beta_in, (float,int)):\n beta = beta_in\n beta_reg_loss = 0.0\n else:\n if isinstance(beta_in, tf.Tensor):\n assert tf.shape(beta_in).shape == 1\n k = tf.size(a_indices) / tf.size(labels)\n k = tf.cast(k, tf.int32)\n beta = tf.reshape(beta_in, (-1, 1))\n beta = tf.tile(beta, [1, k])\n beta = tf.reshape(beta, (-1,))\n beta_reg_loss = tf.reduce_sum(beta) * nu\n else:\n raise ValueError(\"`beta_in` must be one of [float, int, tf.Tensor].\")\n\n d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)\n d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)\n\n pos_loss = tf.maximum(margin + d_ap - beta, 0)\n neg_loss = tf.maximum(margin + beta - d_an, 0)\n\n pair_cnt = tf.cast(tf.size(a_indices), tf.float32)\n\n # normalize based on the number of pairs\n loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt\n return loss\n\n\ndef distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,\n nonzero_loss_threshold=1.4, neg_multiplier=1):\n \"\"\"distance weighted sampling + triplet loss\n Args:\n labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.\n embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.\n margin: Float, margin term in the loss function.\n squared: Boolean, whether or not to square the triplet distances.\n nu: float. Regularization parameter for beta.\n high_var_threshold: float. cutoff for high gradient variance.\n nonzero_loss_threshold: float. cutoff for non-zero loss zone.\n neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.\n Returns:\n triplet_loss: tf.float32 scalar\n\n \"\"\"\n a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,\n embeddings,\n high_var_threshold=high_var_threshold,\n nonzero_loss_threshold=nonzero_loss_threshold,\n neg_multiplier=neg_multiplier)\n\n d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)\n d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)\n if not squared:\n d_ap = K.sqrt(d_ap + 1e-16)\n d_an = K.sqrt(d_an + 1e-16)\n\n loss = tf.maximum(d_ap - d_an + margin, 0)\n loss = tf.reduce_mean(loss)\n return loss\n"
] |
[
[
"tensorflow.reduce_min",
"tensorflow.ones",
"tensorflow.logical_not",
"tensorflow.reshape",
"tensorflow.tile",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.expand_dims",
"tensorflow.log",
"tensorflow.reduce_sum",
"tensorflow.size",
"tensorflow.convert_to_tensor",
"tensorflow.contrib.losses.python.metric_learning.metric_loss_ops.pairwise_distance",
"tensorflow.equal",
"tensorflow.reduce_max",
"tensorflow.gather",
"tensorflow.maximum",
"tensorflow.reduce_mean",
"tensorflow.square"
]
] |
shirlevy007/IML.HUJI
|
[
"07e9db86f83925719242d20de52e65d2fe3786ce"
] |
[
"IMLearn/learners/regressors/polynomial_fitting.py"
] |
[
"from __future__ import annotations\nfrom typing import NoReturn\nfrom . import LinearRegression\nfrom ...base import BaseEstimator\nimport numpy as np\n# import linear_regression\n\n\nclass PolynomialFitting(BaseEstimator):\n \"\"\"\n Polynomial Fitting using Least Squares estimation\n \"\"\"\n def __init__(self, k: int) -> PolynomialFitting:\n \"\"\"\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n \"\"\"\n super().__init__()\n self.deg_ = k\n self.vander_, self.vander_linear_ = None, LinearRegression(False)\n # raise NotImplementedError()\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to polynomial transformed samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n # self.vander_ = np.vander(X, self.deg_, increasing=True)\n self.vander_linear_.fit(self.__transform(X), y)\n\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n return self.vander_linear_.predict(self.__transform(X))\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n return self.vander_linear_.loss(self.__transform(X), y)\n\n def __transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform given input according to the univariate polynomial transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n \"\"\"\n X_vander = np.vander(X, self.deg_ + 1, increasing=True)\n return X_vander\n"
] |
[
[
"numpy.vander"
]
] |
cscyuge/pointer-generator
|
[
"74b3b974e72209dc7a4045cabb758465998c920a"
] |
[
"util_common/nlp/Sumy/summarizers/lsa.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division, print_function, unicode_literals\n\nimport math\n\nfrom warnings import warn\n\ntry:\n import numpy\nexcept ImportError:\n numpy = None\n\ntry:\n from numpy.linalg import svd as singular_value_decomposition\nexcept ImportError:\n singular_value_decomposition = None\nfrom ._summarizer import AbstractSummarizer\n\n\nclass LsaSummarizer(AbstractSummarizer):\n MIN_DIMENSIONS = 3\n REDUCTION_RATIO = 1/1\n _stop_words = frozenset()\n\n @property\n def stop_words(self):\n return self._stop_words\n\n @stop_words.setter\n def stop_words(self, words):\n self._stop_words = frozenset(map(self.normalize_word, words))\n\n def __call__(self, document, sentences_count):\n self._ensure_dependecies_installed()\n dictionary = self._create_dictionary(document)\n # empty document\n if not dictionary:\n return ()\n\n matrix = self._create_matrix(document, dictionary)\n matrix = self._compute_term_frequency(matrix)\n \n u, sigma, v = singular_value_decomposition(matrix, full_matrices=False)\n\n ranks = iter(self._compute_ranks(sigma, v))\n return self._get_best_sentences(document.sentences, sentences_count,\n lambda s: next(ranks))\n\n def _ensure_dependecies_installed(self):\n if numpy is None:\n raise ValueError(\"LSA summarizer requires NumPy. Please, install it by command 'pip install numpy'.\")\n\n def _create_dictionary(self, document):\n \"\"\"Creates mapping key = word, value = row index\"\"\"\n # print(document.words)\n words = map(self.normalize_word, document.words)\n unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)\n\n return dict((w, i) for i, w in enumerate(unique_words))\n\n def _create_matrix(self, document, dictionary):\n \"\"\"\n Creates matrix of shape |unique words|ร|sentences| where cells\n contains number of occurences of words (rows) in senteces (cols).\n \"\"\"\n sentences = document.sentences\n\n words_count = len(dictionary)\n sentences_count = len(sentences)\n if words_count < sentences_count:\n message = (\n \"Number of words (%d) is lower than number of sentences (%d). \"\n \"LSA algorithm may not work properly.\"\n )\n warn(message % (words_count, sentences_count))\n\n # create matrix |unique words|ร|sentences| filled with zeroes\n matrix = numpy.zeros((words_count, sentences_count))\n for col, sentence in enumerate(sentences):\n for word in map(self.stem_word, sentence.words):\n # only valid words is counted (not stop-words, ...)\n if word in dictionary:\n row = dictionary[word]\n matrix[row, col] += 1\n\n return matrix\n\n def _compute_term_frequency(self, matrix, smooth=0.4):\n \"\"\"\n Computes TF metrics for each sentence (column) in the given matrix.\n You can read more about smoothing parameter at URL below:\n http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html\n \"\"\"\n assert 0.0 <= smooth < 1.0\n\n max_word_frequencies = numpy.max(matrix, axis=0)\n rows, cols = matrix.shape\n for row in range(rows):\n for col in range(cols):\n max_word_frequency = max_word_frequencies[col]\n if max_word_frequency != 0:\n frequency = matrix[row, col]/max_word_frequency\n matrix[row, col] = smooth + (1.0 - smooth)*frequency\n\n return matrix\n\n def _compute_ranks(self, sigma, v_matrix):\n assert len(sigma) == v_matrix.shape[0], \"Matrices should be multiplicable\"\n\n dimensions = max(LsaSummarizer.MIN_DIMENSIONS,\n int(len(sigma)*LsaSummarizer.REDUCTION_RATIO))\n powered_sigma = tuple(s**2 if i < dimensions else 0.0\n for i, s in enumerate(sigma))\n\n ranks = []\n # iterate over columns of matrix (rows of transposed matrix)\n for column_vector in v_matrix.T:\n rank = sum(s*v**2 for s, v in zip(powered_sigma, column_vector))\n ranks.append(math.sqrt(rank))\n\n return ranks\n"
] |
[
[
"numpy.max",
"numpy.zeros",
"numpy.linalg.svd"
]
] |
guillaumedavidphd/efit2d-pyopencl
|
[
"bf571f8de86aec710e92896e901322edc4ba31c1"
] |
[
"EFIT2D_Classes.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@package EFIT2D_Classes\n\nSupport Library: efit2d-pyopencl\n\nManuscript Title: Optimized OpenCL implementation of the Elastodynamic Finite Integration Technique for viscoelastic media\n\nAuthors: M Molero, U Iturraran-Viveros, S Aparicio, M.G. Hernรกndez \n\nProgram title: EFIT2D-PyOpenCL\n\nJournal reference: Comput. Phys. Commun.\n\nProgramming language: Python.\n\nExternal routines: numpy, scipy, matplotlib, glumpy, pyopencl\n\nComputer: computers having GPU or Multicore CPU with OpenCL drivers.\n\n\n\tAll classes here defined are used to define:\n\t\t- The scenario,\n\t\t- Material objects,\n\t\t- Input sources,\n\t\t- Inspection setup,\n\t\t- Simulation parameters\n\n\"\"\"\n\nimport\t\t numpy\t\t\t\t as np\nfrom\t\t math\t\t\t\t import sin, cos, sqrt, pi, exp\nimport\t\t random\nimport\t\t time\nfrom\t\t scipy\t\t\t\t import signal\nfrom\t\t scipy.fftpack\t\t import fftshift\nfrom skimage.transform import rotate\n\ntry:\n\tfrom\tImage import\t\tImage \nexcept:\n\tfrom PIL import Image\n\nfrom\t\t matplotlib\t\t import cm\nimport\t\t matplotlib.pyplot\t as plt\n\n\ndef imresize(arr, size, **kwargs):\n from PIL import Image\n size_list = [int(arr.shape[0] * size), int(arr.shape[1] * size)]\n return np.array(Image.fromarray(arr).resize(size_list))\n\n\ndef imrotate(arr, angle, **kwargs):\n return rotate(arr, angle=angle)\n\n\ndef RaisedCosinePulse(t, Freq, Amplitude):\n\t\"\"\"\n\tRaised-Cosine Pulse\n\t\n\t@param t time vector\n\t@param Freq Frequency in Hz\n\t@param Amplitude Real Value of Amplitude\n\t\t\n\t@return Output signal vector\n\t@retval P vector of length equals to the time vector t\n\t\n\t\"\"\" \n\tN = np.size(t,0)\n\tP = np.zeros((N,),dtype=np.float32)\n\tfor m in range(0,N):\n\t\tif t[m] <= 2.0/Freq:\n\t\t\tP[m] = Amplitude *(1-cos(pi*Freq*t[m]))*cos(2*pi*Freq*t[m])\n\n\treturn P\n\n\n\ndef\tricker(t,ts,fsavg):\n\t\"\"\"\n\tRicker Pulse\n\n\t@param t time vector\n\t@param ts temporal delay\n\t@param fsavg pulse width parameter\n\n\t@return Output signal vector\n\t\"\"\"\n\n\ta = fsavg*pi*(t-ts)\n\ta2 = a*a\n\treturn ((1.0-2.0*a2)*np.exp(-a2))\n\n##\n\n\n\nclass NewImage:\n\t\"\"\"\n\tClass NewImage: Definition of the Main Geometric Scenario.\n\t\"\"\"\n\t\n\tdef __init__(self, Width=40, Height=40,Pixel_mm=10,label=0,SPML=False):\n\t\t\"\"\"\n\t\tConstructor of the Class NewImage\n\n\t\t@param Width Width of the Scenario\n\t\t@param Height Height of the Scenario\n\t\t@param Pixel_mm Ratio Pixel per mm\n\t\t@param label Label\n\t\t@param SPML Flag used to indicate the boundary conditions\n\n\n\t\t\"\"\"\n\t\t## Width of the Scenario\n\t\tself.Width\t\t = Width\n\t\t## Height of the Scenario\n\t\tself.Height\t\t = Height\n\t\t## Ratio Pixel per mm\n\t\tself.Pixel_mm\t = Pixel_mm\n\t\t## Label\n\t\tself.Label \t = label\n\t\t\t\n\t\t## Flag used to indicate the boundary conditions\n\t\tself.SPML\t\t = SPML\n\n\t\t## Dimension 1 of the Scenario Matrix\n\t\tself.M\t\t\t = int(self.Height * self.Pixel_mm)\n\t\t## Dimension 2 od the Scenario Matrix\n\t\tself.N\t\t\t = int(self.Width * self.Pixel_mm)\n\t\t\n\t\t## Scenarion Matrix (MxN)\n\t\tself.I\t\t\t = np.ones((self.M,self.N),dtype=np.uint8)*label\n\t\tself.Itemp\t\t = 0\n\t\t\n\t\t## Size of the Boundary Layer\n\t\tself.Tap = 0\n\n\t\t## Configure if boundary layers will be treated as absorbing layers or air layers.\n\t\t#\n\t\t# False: Absorbing layers\n\t\t#\n\t\t# True : Air boundaries\n\t\tself.AirBoundary = False\n\n\n\t\t\n\tdef createLayer(self, centerW, centerH, Width, Height, label, Theta=0):\n\t\t\"\"\"\n\t\tCreate a Layer\n\n\t\t@param centerW center in width-axis of the Layer\n\t\t@param centerH center in height-axis of the Layer\n\t\t@param Width Width of the Layer\n\t\t@param Height Height of the Layer\n\t\t@param label Label of the layer\n\t\t@param Theta Rotation Angle\n\t\t\"\"\"\n\n\n\t\ta = int(Height*self.Pixel_mm/2.0) \n\t\tb = int(Width*self.Pixel_mm/2.0) \n\t\tfor\t x in range(-a,a):\n\t\t\tfor y in range(-b,b):\n\t\t\t\ttempX = round (x + centerH*self.Pixel_mm)\n\t\t\t\ttempY = round (y + centerW*self.Pixel_mm)\n\t\t\t\tself.I[tempX,tempY] = label\n\n\t\tif Theta != 0:\n\t\t\tself.I = imrotate(self.I,Theta,interp='nearest')\n\n\t\t\n\t\t\n\tdef createABS(self,Tap):\n\t\t\"\"\"\n\t\tCreate the boundary layers depending on the boundary conditions required\n\n\t\t@param Tap Layer Size\n\n\n\t\t\"\"\"\n\n\t\tself.Tap\t\t = Tap\n\t\tself.SPML\t\t = True\n\n\t\tself.AirBoundary = False\n\t\t\n\t\tself.M, self.N = np.shape(self.I)\n\n\t\tTP\t\t = round(Tap* self.Pixel_mm )\n\t\tM_pml\t\t = int( self.M\t + 2*TP )\n\t\tN_pml\t\t = int( self.N\t + 2*TP )\n\t\n\t\tself.Itemp\t\t = 255.0*np.ones((M_pml,N_pml),dtype=np.uint8)\n\t\tself.Itemp[TP : M_pml-TP, TP : N_pml-TP] = np.copy(self.I)\n\n\t\t\n\nclass Material:\n\t\"\"\"\n\tClass Material: Definition of a material\n\n\t@param name Material Name\n\t@param rho Density (kg/m3)\n\t@param c11 C11 (Pa)\n\t@param c12 C12 (Pa)\n\t@param c22 C22 (Pa)\n\t@param c44 C44 (Pa)\n\t@param eta_v Bulk Viscosity Constant (Pa s)\n\t@param eta_s Shear Viscosity Constant (Pa s)\n\t@param label Material Label\n\n\t\"\"\"\n\tdef __init__(self, name=\"Water\",rho=1000,c11=2.19e9,c12=0.0,c22=0.0,c44=0.0,eta_v=0, eta_s=0,label=0):\n\t\t\n\t\t\"\"\"\n\t\tConstructor of the Material object\n\t\t\"\"\"\n\t\t## Material Name\n\t\tself.name\t= name\n\n\t\t##Density (kg/m3)\n\t\tself.rho\t= rho\n\n\t\t## C11 (Pa)\n\t\tself.c11\t= c11\n\n\t\t## C12 (Pa)\n\t\tself.c12\t= c12\n\n\t\t## C22 (Pa)\n\t\tself.c22\t= c22\n\n\t\t## C44 (Pa)\n\t\tself.c44\t= c44\n\n\t\t## Longitudinal Velocity (m/s)\n\t\tself.VL\t\t= sqrt( c11/rho )\n\n\t\t## Shear Velocity (m/s)\n\t\tself.VT\t\t= sqrt( c44/rho )\n\t\t\n\t\t## Bulk Viscosity Constant (Pa s)\n\t\tself.eta_v = eta_v\n\t\t\n\t\t## Shear Viscosity Constant (Pa s)\n\t\tself.eta_s = eta_s\n\n\t\t## Material Label\n\t\tself.Label\t= label\n\t\t\n\tdef __str__(self):\n\t\t\treturn \"Material:\" \n\n\tdef __repr__(self):\n\t\t\treturn \"Material:\" \n\t\t\n\n\nclass Source:\n\t\"\"\"\n\tClass Source: Define the Inspection Type\n\n\t@param TypeLaunch Type of Inspection: Transmission or PulseEcho\n\n\t\"\"\"\n\tdef __init__(self,TypeLaunch = 'Transmission'):\n\n\t\t## Type of Inspection: Transmission or PulseEcho\n\t\tself.TypeLaunch\t\t = TypeLaunch\n\n\t\t## Define the location of the transducers in function of the type of the Inspection\n\t\tself.Theta\t\t\t = 0\n\n\t\t\n\t\tif\t self.TypeLaunch == 'PulseEcho':\n\t\t\tself.pulseEcho()\n\t\t\t\n\t\telif self.TypeLaunch == 'Transmission':\n\t\t\tself.transmission()\n\t\t\n\tdef __str__(self):\n\t\treturn \"Source: \" \n\n\tdef __repr__(self):\n\t\treturn \"Source: \"\n\t\t\n\t\t\n\tdef pulseEcho(self):\n\t\t\"\"\"\n\t\tDefine Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver\n\t\t\"\"\"\n\t\tself.Theta = [270*pi/180, 270*pi/180]\n\t\t\n\n\tdef transmission(self):\n\t\t\"\"\"\n\t\tDefine Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver\n\t\t\"\"\"\n\t\tself.Theta = [270*pi/180, 90*pi/180]\n\t\t\n\t\t\n\t\t\n\nclass Transducer:\n\t\"\"\"\n\tClass Transducer: Definition of the Transducer Object\n\n\t@param Size Transducer Size\n\t@param Offset Offset position of the Transducer. By default is set to zero\n\t@param BorderOffset Border offset position of the Transducer. By default is set to zero\n\t@param Location Location is set to zero that indicates Up location\n\t@param name Transducer Name\n\n\n\t\"\"\"\n\tdef __init__(self, Size = 10, Offset=0, BorderOffset=0, Location=0, name = 'emisor'):\n\t\t\"\"\"\n\t\tConstructor of the Class Transducer\n\t\t\"\"\"\n\n\t\t# Location = 0 => Top\n\t\t\n\t\t## Transducer Size\n\t\tself.Size\t\t = Size\n\t\t\n\t\t## Offset position of the Transducer. By default is set to zero\n\t\t#\n\t\t# This offset is measured taking into account the center of the Scenario in the width-axis\n\t\t#\n\t\t# Positive Values indicate offsets toward the right\n\t\t#\n\t\t# Negative values indicate offsets toward the left\n\t\tself.Offset\t\t = Offset\n\t\t\n\t\t## Border offset position of the Transducer. By default is set to zero\n\t\t#\n\t\t# This border offset takes into account the center od the Scenario in the width axis\n\t\t# but this offset is measured in direction of the height-axis\n\t\t#\n\t\t# Only Positive values must be defined.\n\t\tself.BorderOffset = BorderOffset\n\t\t\n\t\t##Size of the trasnducer in Pixels\n\t\tself.SizePixel\t = 0\n\t\t\n\t\t## Location-> 0: Top. This version only works when the location=0\n\t\tself.Location\t = Location\n\t\t\n\t\t## Name of the transducer\n\t\tself.name = name\n\t\t\n\t\t\n\t\n\tdef __str__(self):\n\t\treturn \"Transducer: \" \n\n\tdef __repr__(self):\n\t\treturn \"Transducer: \"\n\n\t\n####################################################################################\n\t\n\t\t\nclass Signal:\n\t\"\"\"\n\tClass Signal: Signal Definition (Source Input for the Simulation)\n\n\t@param Amplitude Signal Amplitude\n\t@param Frequency Frequency Amplitude\n\t@param Name Name of the Signal: RaisedCosinePulse or RickerPulse\n\t@param ts Time Delay: used only for RickerPulse\n\n\n\t\"\"\"\n\tdef __init__(self, Amplitude=1, Frequency=1e6, name =\"RaisedCosinePulse\", ts=1):\n\n\t\t## Signal Amplitude\n\t\tself.Amplitude = Amplitude\n\t\t\n\t\t## Frequency Amplitude\n\t\tself.Frequency = Frequency\n\t\t\n\t\t## Name of the Signal: RaisedCosinePulse or RickerPulse\n\t\tself.name = name\n\n\t\t## Time Delay: used only for RickerPulse\n\t\tif ts == 1:\t\t\n\t\t\tself.ts = 3.0/Frequency;\n\t\t\n\t\t\n\t\n\tdef __str__(self):\n\t\treturn \"Signal: \" \n\n\tdef __repr__(self):\n\t\treturn \"Signal: \"\n\t\t\n\t\t\n\n\tdef generate(self,t):\n\t\t\"\"\"\n\t\tGenerate the signal waveform\n\n\t\t@param t vector time\n\t\t@return signal vector with the same length as the vector time\n\n\t\t\"\"\"\n\n\t\tif self.name == \"RaisedCosinePulse\":\n\t\t\treturn RaisedCosinePulse(t, self.Frequency, self.Amplitude)\n\t\telif self.name == \"RickerPulse\":\n\t\t\treturn ricker(t, self.ts, self.Frequency)\n\t\t\t\n\tdef saveSignal(self,t):\t\n\t\t\"\"\"\n\t\tSave the signal waveform into the object\n\t\t@param t vector time\n\n\t\t\"\"\"\n\t\tself.time_signal = self.generate(t)\n\t\t\t\t\n\n\n\n\n######################################\nclass Inspection:\n\t\"\"\"\n\tClass Inspection: used for the configuration of the inspections to be emulated\n\t\"\"\"\n\t\n\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tConstructor of the Class Inspection\n\t\t\"\"\"\n\n\t\t## Position of the Transducer (Angle)\n\t\tself.Theta\t= 0\n\t\t\n\t\t## Vector x-axis Position of the Transducer\n\t\tself.XL\t\t= 0\n\t\t\n\t\t## Vector y-axis Position of the Transducer\n\t\tself.YL\t\t= 0\n\t\t\n\t\t##\n\t\tself.IR\t\t= 0\n\t\t\n\n\tdef __str__(self):\n\t\treturn \"Inspection: \" \n\n\tdef __repr__(self):\n\t\treturn \"Inspection: \"\n\t\t\n\t\n\n\tdef setTransmisor(self, source, transducer, x2, y2, X0, Y0):\n\n\t\tself.Theta\t= source.Theta\n\n\t\tNtheta\t\t= np.size(self.Theta,0)\n\t\tNXL\t\t\t= int(2*transducer.SizePixel)\n\n\t\txL\t\t\t= np.zeros((NXL,),dtype=np.float32)\n\t\tyL\t\t\t= np.zeros((NXL,),dtype=np.float32)\n\n\t\tfor m in range(0,Ntheta):\n\n\t\t\tif np.abs(np.cos(self.Theta[m])) < 1e-5:\n\t\t\t\tyL = np.linspace(y2[m]-transducer.SizePixel,y2[m]+transducer.SizePixel,num=NXL, endpoint=True)\n\t\t\t\txL[:] = x2[m]*np.ones((NXL,),dtype=np.float32)\n\n\n\t\t\telif np.abs(np.cos(self.Theta[m])) == 1:\n\t\t\t\txL[:] = np.linspace(x2[m]-transducer.SizePixel, x2[m]+transducer.SizePixel,num=NXL, endpoint=True)\n\t\t\t\tyL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )\n\n\t\t\telse:\n\t\t\t\txL[:] = np.linspace(x2[m]-(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))),x2[m]+(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))), num=NXL, endpoint=True )\n\t\t\t\tyL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 )\t)*( xL[:]-x2[m] )\n\n\t\t\tif m==0:\n\t\t\t\tself.XL\t\t= np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)\n\t\t\t\tself.YL\t\t= np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)\n\n\n\t\t\tself.XL[:,m] = (np.around(xL[:]))\n\t\t\tself.YL[:,m] = (np.around(yL[:]))\n\t\t\t\n\n\n\tdef addOffset(self, image, transducer, NRI):\n\t\t\"\"\"\n\t\tHandle Offset\n\n\t\t\"\"\"\n\t\tNXL\t = np.size(self.XL,0)\n\t\tNtheta = np.size(self.Theta,0)\n\t\t\n\t\tM_pml, N_pml = np.shape(image.Itemp)\n\n\t\tself.YL +=\t (np.around(transducer.Offset * image.Pixel_mm * NRI / float(N_pml)))\n\n\t\tself.IR\t\t = np.zeros((Ntheta,Ntheta),dtype=np.float32)\n\t\tB\t\t\t = list(range(0,Ntheta))\n\t\tself.IR[:,0] = np.int32(B[:])\n\n\t\tfor i in range(1,Ntheta):\n\t\t\tB = np.roll(B,-1)\n\t\t\tself.IR[:,i] = np.int32(B)\n\t\t\t\n\tdef addBorderOffset(self, image, transducer, MRI):\n\t\t\"\"\"\n\t\tHandle Border Offset\n\n\t\t\"\"\"\n\n\t\tM_pml, N_pml = np.shape(image.Itemp)\n\t\tratio = float(MRI) / float(M_pml)\n\t\t\n\t\tself.XL[:,0] += (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )\n\t\tself.XL[:,1] -= (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )\n\t\t\n\tdef flip(self):\n\t\tself.XL = np.fliplr(self.XL)\n\n\n\tdef SetReception(self,T):\n\n\t\tReceptorX = (self.XL)\n\t\tReceptorY = (self.YL)\n\t\tM,N\t\t = np.shape(ReceptorX)\n\t\ttemp = np.zeros((M,N-1),dtype=np.float32)\n\t\t\n\t\tfor\t mm\t in range(0,M):\n\t\t\tfor ir in range(0,N-1):\n\t\t\t\t\n\t\t\t\ttemp[mm,ir]\t =\t T[ int(ReceptorX[ mm,int(self.IR[0,ir+1]) ] ) , int(ReceptorY[ mm,int(self.IR[0,ir+1]) ]) ]\n\t\t\n\t\tif self.Field:\n\t\t\treturn temp.transpose()\n\t\telse:\n\t\t\treturn np.mean(temp,0)\n\t\t\n\t\t\n\tdef SetReceptionVector(self, T, x, y):\t\n\t\tM\t\t = np.size(x)\n\t\ttemp = np.zeros((M,),dtype=np.float32)\n\t\tfor\t mm\tin range(0,M):\n\t\t\ttemp[mm] = T[(int(x[mm])),(int(y[mm]))]\n\t\t\n\t\treturn temp\t\n\t\t\n\t\t\n\t\t\t\n\t\t\n\nclass SimulationModel:\n\t\"\"\"\n\tClass Simulation: setup the parameters for the numerical simulation \n\n\tUsage:\n\t\t- First Define an Instance of the SimulationModel Object\n\t\t- Execute the method class: jobParameters using as input the materials list\n\t\t- Execute the method class: createNumerical Model using as input the scenario\n\t\t- Execute the method class: initReceivers to initialize the receivers\n\t\t- Execute the mtehod class: save signal using as input the attribute simModel.t\n\t\t- Save the Device into the simModel.Device attribute\n\n\n\t@param TimeScale Scale Time Factor\n\t@param MaxFreq Maximum Frequency\n\t@param PointCycle Points per Cycle\n\t@param SimTime Time Simuation\n\t@param SpatialScale Spatial Scale: 1 -> meters, 1e-3 -> millimeters\n\n\n\t\"\"\"\n\tdef __init__(self,TimeScale=1, MaxFreq=2e6, PointCycle=10, SimTime=50e6, SpatialScale=1e-3):\n\n\t\t## Scale Time Factor\n\t\tself.TimeScale\t= TimeScale\n\t\t\n\t\t## Maximum Frequency\n\t\tself.MaxFreq\t= MaxFreq\t # MHz\n\t\t\n\t\t## Points per Cycle\n\t\tself.PointCycle = PointCycle\n\t\t\n\t\t## Time Simuation\n\t\tself.SimTime\t= SimTime\t # microseconds\n\n\t\t## Spatial Scale: 1 -> meters, 1e-3 -> millimeters\n\t\tself.SpatialScale = SpatialScale\n\t\t\n\t\t## Spatial Discretization\n\t\tself.dx = 0\n\n\t\t## Temporal Discretization\n\t\tself.dt = 0\n\t\t\n\t\tself.Rgrid = 0\n\t\tself.TapG = 0\n\t\tself.t = 0\n\t\tself.Ntiempo = 0\n\t\t\n\t\tself.MRI,self.NRI = (0,0)\n\t\t\n\t\tself.receiver_signals = 0\n\t\tself.Device = 'CPU'\n\t\t\n\t\tself.XL = 0\n\t\tself.YL = 0\n\t\t\n\t\t\n\tdef __str__(self):\n\t\treturn \"Simulation Model: \" \n\n\tdef __repr__(self):\n\t\treturn \"Simulation Model: \"\n\t\t\n\tdef jobParameters(self,materiales):\n\t\t\"\"\"\n\t\tDefine Main Simulation Parameters\n\n\t\t@parm materiales Materials List\n\n\t\t\n\t\t\"\"\"\n\t\tindVL = [mat.VL for mat in materiales if mat.VL > 400]\n\t\tindVT = [mat.VT for mat in materiales if mat.VT > 400]\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\tVL\t = np.array(indVL)\n\t\tVT\t = np.array(indVT)\n\t\tV\t = np.hstack( (VL, VT) )\n\t\t\n\t\tself.dx = np.float32( np.min([V]) / (self.PointCycle*self.MaxFreq) )\n\t\tself.dt = self.TimeScale * np.float32( 0.7071 * self.dx / (\t np.max([V]) ) )\n\n\t\t\t\n\t\tself.Ntiempo = int(round(self.SimTime/self.dt))\n\t\tself.t\t= self.dt*np.arange(0,self.Ntiempo)\n\t\n\t\n\tdef createNumericalModel(self, image):\n\t\t\"\"\"\n\t\tCreate the Numerical Model\n\n\t\t@param image The Scenario Object\n\t\t\"\"\"\n\n\t\t#Spatial Scale\n\t\tMp\t\t\t =\t np.shape(image.Itemp)[0]*self.SpatialScale/image.Pixel_mm/self.dx\n\t\tself.Rgrid\t =\t Mp/np.shape(image.Itemp)[0]\n\t\t\n\t\tself.TapG\t =\t np.around(image.Tap * self.Rgrid * image.Pixel_mm)\n\t\tself.Im\t\t =\t imresize(image.Itemp, self.Rgrid, interp='nearest')\n\t\tself.MRI,self.NRI =\t np.shape(self.Im)\n\t\n\t\tprint(\"dt: \" + str(self.dt) + \" dx: \" + str(self.dx) + \" Grid: \" + str(self.MRI) + \" x \" + str(self.NRI))\n\t\n\t\t\n\tdef initReceivers(self):\n\t\t\"\"\"\n\t\tInitialize the receivers\n\n\t\t\"\"\"\n\t\tself.receiver_signals = 0\n\t\n\t\n\tdef setDevice(self,Device):\n\t\t\"\"\"\n\t\tSet the Computation Device\n\n\t\t@param Device Device to be used\n\n\t\tDefine the device used to compute the simulations:\n\t\t\t - \"CPU\" : uses the global memory in th CPU\n\t\t\t - \"GPU_Global\" : uses the global memory in the GPU\n\t\t\t - \"GPU_Local\" : uses the local memory in the GPU\n\n\t\t\"\"\"\n\n\t\tif Device == 0:\n\t\t\tself.Device = 'CPU'\n\t\telif Device ==1:\n\t\t\tself.Device = 'GPU_Global'\n\t\telif Device ==2: \n\t\t\tself.Device = 'GPU_Local'\n\t\t\t\n\t\t\n\t\t\n\t\n\n\t\t\n\n\n\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.copy",
"numpy.ones",
"numpy.hstack",
"numpy.exp",
"numpy.shape",
"numpy.roll",
"numpy.mean",
"numpy.min",
"numpy.arange",
"numpy.size",
"numpy.cos",
"numpy.int32",
"numpy.linspace",
"numpy.around",
"numpy.fliplr"
]
] |
xianruizhong/SpHAM
|
[
"c85a5fe023bd0d760eb42c896cd57ecc07014087"
] |
[
"generate_exampleA.py"
] |
[
"import numpy as np\ndef generate_A(filename1, filename2, noise = 'gau'):\n exp_T = 4000\n big_y_true_gau = []\n big_y_noise_gau = []\n big_y_true_t2 = []\n big_y_noise_t2 = []\n for times in range(100):\n y_true_gau = np.zeros((exp_T, 1, 1))\n y_true_gau[0] = np.random.rand()\n y_true_gau[1] = np.random.rand()\n y_true_t2 = np.zeros((exp_T, 1, 1))\n y_true_t2[0] = np.random.rand()\n y_true_t2[1] = np.random.rand()\n y_noise_gau = y_true_gau.copy()\n y_noise_t2 = y_true_t2.copy()\n e_gau = np.random.normal(0, 0.3, (exp_T, 1))\n e_t2 = np.random.standard_t(2, (exp_T,1))\n y_noise_gau[0] = y_true_gau[0] + e_gau[0]\n y_noise_gau[1] = y_true_gau[1] + e_gau[1]\n y_noise_t2[0] = y_true_t2[0] + e_t2[0]\n y_noise_t2[1] = y_true_t2[1] + e_t2[1]\n for t in range(2, exp_T):\n y_true_gau[t] = (3./2.)*np.sin(np.pi / 2. * y_noise_gau[t - 1]) - np.sin(np.pi / 2. * y_noise_gau[t - 2])\n y_noise_gau[t] = y_true_gau[t] + 2* e_gau[t]\n\n y_true_t2[t] = np.sin(np.pi / 2. * y_noise_t2[t - 1]) -np.sin(np.pi / 2. * y_noise_t2[t - 2])\n y_noise_t2[t] = y_true_t2[t] + 2* e_t2[t]\n big_y_true_gau.append(y_true_gau)\n big_y_noise_gau.append(y_noise_gau)\n big_y_true_t2.append(y_true_t2)\n big_y_noise_t2.append(y_noise_t2)\n if noise == 'gau':\n with open(filename1, 'wb') as f:\n np.save(f, np.array(big_y_true_gau))\n with open(filename2, 'wb') as f:\n np.save(f, np.array(big_y_noise_gau))\n else:\n with open(filename1, 'wb') as f:\n np.save(f, np.array(big_y_true_t2))\n with open(filename2, 'wb') as f:\n np.save(f, np.array(big_y_noise_t2))\n"
] |
[
[
"numpy.random.normal",
"numpy.sin",
"numpy.array",
"numpy.random.rand",
"numpy.zeros",
"numpy.random.standard_t"
]
] |
akashsengupta1997/GraphCMR
|
[
"0b8b05be4f711995ba50e414effbde98b6b11c5b"
] |
[
"evaluate_3dpw_mine.py"
] |
[
"import os\nimport numpy as np\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport argparse\nimport cv2\n\nimport config\nfrom utils import Mesh\nfrom models import CMR\nfrom models.smpl_from_lib import SMPL\nfrom utils.pose_utils import compute_similarity_transform_batch, \\\n scale_and_translation_transform_batch\nfrom utils.cam_utils import orthographic_project_torch, undo_keypoint_normalisation\nfrom datasets.my_3dpw_eval_dataset import PW3DEvalDataset\n\n\ndef evaluate_3dpw(model,\n eval_dataset,\n metrics,\n device,\n vis_save_path,\n num_workers=4,\n pin_memory=True,\n vis_every_n_batches=1000):\n\n eval_dataloader = DataLoader(eval_dataset,\n batch_size=1,\n shuffle=False,\n drop_last=True,\n num_workers=num_workers,\n pin_memory=pin_memory)\n\n smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1)\n smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male')\n smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female')\n smpl.to(device)\n smpl_male.to(device)\n smpl_female.to(device)\n\n J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()\n J_regressor_batch = J_regressor[None, :].to(device)\n\n if 'pve' in metrics:\n pve_smpl_sum = 0.0\n pve_graph_sum = 0.0\n pve_smpl_per_frame = []\n pve_graph_per_frame = []\n\n if 'pve_scale_corrected' in metrics:\n pve_scale_corrected_smpl_sum = 0.0\n pve_scale_corrected_graph_sum = 0.0\n pve_scale_corrected_smpl_per_frame = []\n pve_scale_corrected_graph_per_frame = []\n\n if 'pve_pa' in metrics:\n pve_pa_smpl_sum = 0.0\n pve_pa_graph_sum = 0.0\n pve_pa_smpl_per_frame = []\n pve_pa_graph_per_frame = []\n\n if 'pve-t' in metrics:\n pvet_sum = 0.0\n pvet_per_frame = []\n\n if 'pve-t_scale_corrected' in metrics:\n pvet_scale_corrected_sum = 0.0\n pvet_scale_corrected_per_frame = []\n\n if 'mpjpe' in metrics:\n mpjpe_smpl_sum = 0.0\n mpjpe_graph_sum = 0.0\n mpjpe_smpl_per_frame = []\n mpjpe_graph_per_frame = []\n\n if 'mpjpe_scale_corrected' in metrics:\n mpjpe_scale_corrected_smpl_sum = 0.0\n mpjpe_scale_corrected_graph_sum = 0.0\n mpjpe_scale_corrected_smpl_per_frame = []\n mpjpe_scale_corrected_graph_per_frame = []\n\n if 'j3d_rec_err' in metrics:\n j3d_rec_err_smpl_sum = 0.0\n j3d_rec_err_graph_sum = 0.0\n j3d_rec_err_smpl_per_frame = []\n j3d_rec_err_graph_per_frame = []\n\n if 'pve_2d' in metrics:\n pve_2d_smpl_sum = 0.0\n pve_2d_graph_sum = 0.0\n\n if 'pve_2d_scale_corrected' in metrics:\n pve_2d_scale_corrected_smpl_sum = 0.0\n pve_2d_scale_corrected_graph_sum = 0.0\n\n if 'pve_2d_pa' in metrics:\n pve_2d_pa_smpl_sum = 0.0\n pve_2d_pa_graph_sum = 0.0\n\n num_samples = 0\n num_vertices = 6890\n num_joints3d = 14\n\n model.eval()\n for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):\n # ------------------------------- TARGETS and INPUTS -------------------------------\n input = samples_batch['input']\n input = input.to(device)\n\n target_pose = samples_batch['pose'].to(device)\n target_shape = samples_batch['shape'].to(device)\n target_gender = samples_batch['gender'][0]\n\n if target_gender == 'm':\n target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],\n global_orient=target_pose[:, :3],\n betas=target_shape)\n target_vertices = target_smpl_output.vertices\n target_reposed_smpl_output = smpl_male(betas=target_shape)\n target_reposed_vertices = target_reposed_smpl_output.vertices\n target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)\n target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]\n elif target_gender == 'f':\n target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],\n global_orient=target_pose[:, :3],\n betas=target_shape)\n target_vertices = target_smpl_output.vertices\n target_reposed_smpl_output = smpl_female(betas=target_shape)\n target_reposed_vertices = target_reposed_smpl_output.vertices\n target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)\n target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]\n\n # ------------------------------- PREDICTIONS -------------------------------\n pred_vertices, pred_vertices_smpl, pred_camera, pred_rotmat, pred_betas = model(input)\n pred_vertices_projected2d = orthographic_project_torch(pred_vertices, pred_camera)\n pred_vertices_projected2d = undo_keypoint_normalisation(pred_vertices_projected2d, input.shape[-1])\n pred_vertices_smpl_projected2d = orthographic_project_torch(pred_vertices_smpl, pred_camera)\n pred_vertices_smpl_projected2d = undo_keypoint_normalisation(pred_vertices_smpl_projected2d, input.shape[-1])\n pred_reposed_smpl_output = smpl(betas=pred_betas)\n pred_reposed_vertices = pred_reposed_smpl_output.vertices\n\n pred_joints_h36m = torch.matmul(J_regressor_batch, pred_vertices)\n pred_joints_h36mlsp = pred_joints_h36m[:, config.H36M_TO_J14, :]\n\n pred_joints_smpl_h36m = torch.matmul(J_regressor_batch, pred_vertices_smpl)\n pred_joints_smpl_h36mlsp = pred_joints_smpl_h36m[:, config.H36M_TO_J14, :]\n\n # Numpy-fying\n target_vertices = target_vertices.cpu().detach().numpy()\n target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()\n target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()\n\n pred_vertices = pred_vertices.cpu().detach().numpy()\n pred_vertices_smpl = pred_vertices_smpl.cpu().detach().numpy()\n pred_vertices_projected2d = pred_vertices_projected2d.cpu().detach().numpy()\n pred_vertices_smpl_projected2d = pred_vertices_smpl_projected2d.cpu().detach().numpy()\n pred_reposed_vertices = pred_reposed_vertices.cpu().detach().numpy()\n pred_joints_h36mlsp = pred_joints_h36mlsp.cpu().detach().numpy()\n pred_joints_smpl_h36mlsp = pred_joints_smpl_h36mlsp.cpu().detach().numpy()\n\n # ------------------------------- METRICS -------------------------------\n\n if 'pve' in metrics:\n pve_smpl_batch = np.linalg.norm(pred_vertices_smpl - target_vertices, axis=-1) # (1, 6890)\n pve_graph_batch = np.linalg.norm(pred_vertices - target_vertices, axis=-1)\n pve_smpl_sum += np.sum(pve_smpl_batch) # scalar\n pve_graph_sum += np.sum(pve_graph_batch)\n pve_smpl_per_frame.append(np.mean(pve_smpl_batch, axis=-1))\n pve_graph_per_frame.append(np.mean(pve_graph_batch, axis=-1))\n\n # Scale and translation correction\n if 'pve_scale_corrected' in metrics:\n pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,\n target_vertices)\n pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,\n target_vertices)\n pve_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_sc - target_vertices,\n axis=-1) # (1, 6890)\n pve_sc_graph_batch = np.linalg.norm(pred_vertices_sc - target_vertices,\n axis=-1) # (1, 6890)\n pve_scale_corrected_smpl_sum += np.sum(pve_sc_smpl_batch) # scalar\n pve_scale_corrected_graph_sum += np.sum(pve_sc_graph_batch) # scalar\n pve_scale_corrected_smpl_per_frame.append(np.mean(pve_sc_smpl_batch, axis=-1))\n pve_scale_corrected_graph_per_frame.append(np.mean(pve_sc_graph_batch, axis=-1))\n\n # Procrustes analysis\n if 'pve_pa' in metrics:\n pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)\n pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)\n pve_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_pa - target_vertices, axis=-1) # (1, 6890)\n pve_pa_graph_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (1, 6890)\n pve_pa_smpl_sum += np.sum(pve_pa_smpl_batch) # scalar\n pve_pa_graph_sum += np.sum(pve_pa_graph_batch) # scalar\n pve_pa_smpl_per_frame.append(np.mean(pve_pa_smpl_batch, axis=-1))\n pve_pa_graph_per_frame.append(np.mean(pve_pa_graph_batch, axis=-1))\n\n if 'pve-t' in metrics:\n pvet_batch = np.linalg.norm(pred_reposed_vertices - target_reposed_vertices, axis=-1)\n pvet_sum += np.sum(pvet_batch)\n pvet_per_frame.append(np.mean(pvet_batch, axis=-1))\n\n # Scale and translation correction\n if 'pve-t_scale_corrected' in metrics:\n pred_reposed_vertices_sc = scale_and_translation_transform_batch(pred_reposed_vertices,\n target_reposed_vertices)\n pvet_scale_corrected_batch = np.linalg.norm(pred_reposed_vertices_sc - target_reposed_vertices,\n axis=-1) # (bs, 6890)\n pvet_scale_corrected_sum += np.sum(pvet_scale_corrected_batch) # scalar\n pvet_scale_corrected_per_frame.append(np.mean(pvet_scale_corrected_batch, axis=-1))\n\n if 'mpjpe' in metrics:\n mpjpe_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)\n mpjpe_graph_batch = np.linalg.norm(pred_joints_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)\n mpjpe_smpl_sum += np.sum(mpjpe_smpl_batch)\n mpjpe_graph_sum += np.sum(mpjpe_graph_batch)\n mpjpe_smpl_per_frame.append(np.mean(mpjpe_smpl_batch, axis=-1))\n mpjpe_graph_per_frame.append(np.mean(mpjpe_graph_batch, axis=-1))\n\n # Scale and translation correction\n if 'mpjpe_scale_corrected' in metrics:\n pred_joints_smpl_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_smpl_h36mlsp,\n target_joints_h36mlsp)\n pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_h36mlsp,\n target_joints_h36mlsp)\n mpjpe_scale_corrected_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_sc - target_joints_h36mlsp,\n axis=-1) # (bs, 14)\n mpjpe_scale_corrected_graph_batch = np.linalg.norm(pred_joints_h36mlsp_sc - target_joints_h36mlsp,\n axis=-1) # (bs, 14)\n mpjpe_scale_corrected_smpl_sum += np.sum(mpjpe_scale_corrected_smpl_batch)\n mpjpe_scale_corrected_graph_sum += np.sum(mpjpe_scale_corrected_graph_batch)\n mpjpe_scale_corrected_smpl_per_frame.append(np.mean(mpjpe_scale_corrected_smpl_batch, axis=-1))\n mpjpe_scale_corrected_graph_per_frame.append(np.mean(mpjpe_scale_corrected_graph_batch, axis=-1))\n\n # Procrustes analysis\n if 'j3d_rec_err' in metrics:\n pred_joints_smpl_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_smpl_h36mlsp,\n target_joints_h36mlsp)\n pred_joints_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_h36mlsp, target_joints_h36mlsp)\n j3d_rec_err_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)\n j3d_rec_err_graph_batch = np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)\n j3d_rec_err_smpl_sum += np.sum(j3d_rec_err_smpl_batch)\n j3d_rec_err_graph_sum += np.sum(j3d_rec_err_graph_batch)\n j3d_rec_err_smpl_per_frame.append(np.mean(j3d_rec_err_smpl_batch, axis=-1))\n j3d_rec_err_graph_per_frame.append(np.mean(j3d_rec_err_graph_batch, axis=-1))\n\n if 'pve_2d' in metrics:\n pred_vertices_smpl_2d = pred_vertices_smpl[:, :, :2]\n pred_vertices_2d = pred_vertices[:, :, :2]\n target_vertices_2d = target_vertices[:, :, :2]\n pve_2d_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d - target_vertices_2d, axis=-1) # (bs, 6890)\n pve_2d_graph_batch = np.linalg.norm(pred_vertices_2d - target_vertices_2d, axis=-1) # (bs, 6890)\n pve_2d_smpl_sum += np.sum(pve_2d_smpl_batch)\n pve_2d_graph_sum += np.sum(pve_2d_graph_batch)\n\n # Scale and translation correction\n if 'pve_2d_scale_corrected' in metrics:\n pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,\n target_vertices)\n pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,\n target_vertices)\n pred_vertices_smpl_2d_sc = pred_vertices_smpl_sc[:, :, :2]\n pred_vertices_2d_sc = pred_vertices_sc[:, :, :2]\n target_vertices_2d = target_vertices[:, :, :2]\n pve_2d_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_sc - target_vertices_2d,\n axis=-1) # (bs, 6890)\n pve_2d_sc_graph_batch = np.linalg.norm(pred_vertices_2d_sc - target_vertices_2d,\n axis=-1) # (bs, 6890)\n pve_2d_scale_corrected_smpl_sum += np.sum(pve_2d_sc_smpl_batch)\n pve_2d_scale_corrected_graph_sum += np.sum(pve_2d_sc_graph_batch)\n\n # Procrustes analysis\n if 'pve_2d_pa' in metrics:\n pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)\n pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)\n pred_vertices_smpl_2d_pa = pred_vertices_smpl_pa[:, :, :2]\n pred_vertices_2d_pa = pred_vertices_pa[:, :, :2]\n target_vertices_2d = target_vertices[:, :, :2]\n pve_2d_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)\n pve_2d_pa_graph_batch = np.linalg.norm(pred_vertices_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)\n pve_2d_pa_smpl_sum += np.sum(pve_2d_pa_smpl_batch)\n pve_2d_pa_graph_sum += np.sum(pve_2d_pa_graph_batch)\n\n num_samples += target_pose.shape[0]\n\n # ------------------------------- VISUALISE -------------------------------\n if vis_every_n_batches is not None:\n if batch_num % vis_every_n_batches == 0:\n vis_imgs = samples_batch['vis_img'].numpy()\n vis_imgs = np.transpose(vis_imgs, [0, 2, 3, 1])\n\n fnames = samples_batch['fname']\n\n plt.figure(figsize=(16, 12))\n plt.subplot(341)\n plt.imshow(vis_imgs[0])\n\n plt.subplot(342)\n plt.imshow(vis_imgs[0])\n plt.scatter(pred_vertices_projected2d[0, :, 0], pred_vertices_projected2d[0, :, 1], s=0.1, c='r')\n\n plt.subplot(343)\n plt.imshow(vis_imgs[0])\n plt.scatter(pred_vertices_smpl_projected2d[0, :, 0], pred_vertices_smpl_projected2d[0, :, 1], s=0.1, c='r')\n\n plt.subplot(345)\n plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')\n plt.scatter(pred_vertices[0, :, 0], pred_vertices[0, :, 1], s=0.1, c='r')\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(346)\n plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')\n plt.scatter(pred_vertices_smpl[0, :, 0], pred_vertices_smpl[0, :, 1], s=0.1, c='r')\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(347)\n plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')\n plt.scatter(pred_vertices_pa[0, :, 0], pred_vertices_pa[0, :, 1], s=0.1, c='r')\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(348)\n plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')\n plt.scatter(pred_vertices_smpl_pa[0, :, 0], pred_vertices_smpl_pa[0, :, 1], s=0.1, c='r')\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(349)\n plt.scatter(target_reposed_vertices[0, :, 0], target_reposed_vertices[0, :, 1], s=0.1, c='b')\n plt.scatter(pred_reposed_vertices_sc[0, :, 0], pred_reposed_vertices_sc[0, :, 1], s=0.1, c='r')\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(3, 4, 10)\n for j in range(num_joints3d):\n plt.scatter(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], c='r')\n plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')\n plt.text(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], s=str(j))\n plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(3, 4, 11)\n for j in range(num_joints3d):\n plt.scatter(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], c='r')\n plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')\n plt.text(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], s=str(j))\n plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n plt.subplot(3, 4, 12)\n for j in range(num_joints3d):\n plt.scatter(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], c='r')\n plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')\n plt.text(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], s=str(j))\n plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))\n plt.gca().invert_yaxis()\n plt.gca().set_aspect('equal', adjustable='box')\n\n # plt.show()\n save_fig_path = os.path.join(vis_save_path, fnames[0])\n plt.savefig(save_fig_path, bbox_inches='tight')\n plt.close()\n\n if 'pve' in metrics:\n pve_smpl = pve_smpl_sum / (num_samples * num_vertices)\n print('PVE SMPL: {:.5f}'.format(pve_smpl))\n pve_graph = pve_graph_sum / (num_samples * num_vertices)\n print('PVE GRAPH: {:.5f}'.format(pve_graph))\n pve_smpl_per_frame = np.concatenate(pve_smpl_per_frame, axis=0)\n pve_graph_per_frame = np.concatenate(pve_graph_per_frame, axis=0)\n np.save(os.path.join(save_path, 'pve_per_frame.npy'), pve_smpl_per_frame)\n np.save(os.path.join(save_path, 'pve_graph_per_frame.npy'), pve_graph_per_frame)\n\n if 'pve_scale_corrected' in metrics:\n pve_sc_smpl = pve_scale_corrected_smpl_sum / (num_samples * num_vertices)\n print('PVE SC SMPL: {:.5f}'.format(pve_sc_smpl))\n pve_sc_graph = pve_scale_corrected_graph_sum / (num_samples * num_vertices)\n print('PVE SC GRAPH: {:.5f}'.format(pve_sc_graph))\n pve_scale_corrected_smpl_per_frame = np.concatenate(pve_scale_corrected_smpl_per_frame, axis=0)\n pve_scale_corrected_graph_per_frame = np.concatenate(pve_scale_corrected_graph_per_frame, axis=0)\n np.save(os.path.join(save_path, 'pve_scale_corrected_per_frame.npy'),\n pve_scale_corrected_smpl_per_frame)\n np.save(os.path.join(save_path, 'pve_scale_corrected_graph_per_frame.npy'),\n pve_scale_corrected_graph_per_frame)\n\n if 'pve_pa' in metrics:\n pve_pa_smpl = pve_pa_smpl_sum / (num_samples * num_vertices)\n print('PVE PA SMPL: {:.5f}'.format(pve_pa_smpl))\n pve_pa_graph = pve_pa_graph_sum / (num_samples * num_vertices)\n print('PVE PA GRAPH: {:.5f}'.format(pve_pa_graph))\n pve_pa_smpl_per_frame = np.concatenate(pve_pa_smpl_per_frame, axis=0)\n pve_pa_graph_per_frame = np.concatenate(pve_pa_graph_per_frame, axis=0)\n np.save(os.path.join(save_path, 'pve_pa_per_frame.npy'), pve_pa_smpl_per_frame)\n np.save(os.path.join(save_path, 'pve_pa_graph_per_frame.npy'), pve_pa_graph_per_frame)\n\n if 'pve-t' in metrics:\n pvet = pvet_sum / (num_samples * num_vertices)\n print('PVE-T: {:.5f}'.format(pvet))\n pvet_per_frame = np.concatenate(pvet_per_frame, axis=0)\n np.save(os.path.join(save_path, 'pvet_per_frame.npy'), pvet_per_frame)\n\n if 'pve-t_scale_corrected' in metrics:\n pvet_sc = pvet_scale_corrected_sum / (num_samples * num_vertices)\n print('PVE-T SC: {:.5f}'.format(pvet_sc))\n pvet_scale_corrected_per_frame = np.concatenate(pvet_scale_corrected_per_frame, axis=0)\n np.save(os.path.join(save_path, 'pvet_scale_corrected_per_frame.npy'),\n pvet_scale_corrected_per_frame)\n\n if 'mpjpe' in metrics:\n mpjpe_smpl = mpjpe_smpl_sum / (num_samples * num_joints3d)\n print('MPJPE SMPL: {:.5f}'.format(mpjpe_smpl))\n mpjpe_graph = mpjpe_graph_sum / (num_samples * num_joints3d)\n print('MPJPE GRAPH: {:.5f}'.format(mpjpe_graph))\n mpjpe_smpl_per_frame = np.concatenate(mpjpe_smpl_per_frame, axis=0)\n mpjpe_graph_per_frame = np.concatenate(mpjpe_graph_per_frame, axis=0)\n np.save(os.path.join(save_path, 'mpjpe_per_frame.npy'), mpjpe_smpl_per_frame)\n np.save(os.path.join(save_path, 'mpjpe_graph_per_frame.npy'), mpjpe_graph_per_frame)\n\n if 'mpjpe_scale_corrected' in metrics:\n mpjpe_sc_smpl = mpjpe_scale_corrected_smpl_sum / (num_samples * num_joints3d)\n print('MPJPE SC SMPL: {:.5f}'.format(mpjpe_sc_smpl))\n mpjpe_sc_graph = mpjpe_scale_corrected_graph_sum / (num_samples * num_joints3d)\n print('MPJPE SC GRAPH: {:.5f}'.format(mpjpe_sc_graph))\n mpjpe_scale_corrected_smpl_per_frame = np.concatenate(\n mpjpe_scale_corrected_smpl_per_frame, axis=0)\n mpjpe_scale_corrected_graph_per_frame = np.concatenate(\n mpjpe_scale_corrected_graph_per_frame, axis=0)\n np.save(os.path.join(save_path, 'mpjpe_scale_corrected_per_frame.npy'),\n mpjpe_scale_corrected_smpl_per_frame)\n np.save(os.path.join(save_path, 'mpjpe_scale_corrected_graph_per_frame.npy'),\n mpjpe_scale_corrected_graph_per_frame)\n\n if 'j3d_rec_err' in metrics:\n j3d_rec_err_smpl = j3d_rec_err_smpl_sum / (num_samples * num_joints3d)\n print('Rec Err SMPL: {:.5f}'.format(j3d_rec_err_smpl))\n j3d_rec_err_graph = j3d_rec_err_graph_sum / (num_samples * num_joints3d)\n print('Rec Err GRAPH: {:.5f}'.format(j3d_rec_err_graph))\n j3d_rec_err_smpl_per_frame = np.concatenate(j3d_rec_err_smpl_per_frame, axis=0)\n j3d_rec_err_graph_per_frame = np.concatenate(j3d_rec_err_graph_per_frame, axis=0)\n np.save(os.path.join(save_path, 'j3d_rec_err_per_frame.npy'),\n j3d_rec_err_smpl_per_frame)\n np.save(os.path.join(save_path, 'j3d_rec_err_graph_per_frame.npy'),\n j3d_rec_err_graph_per_frame)\n\n if 'pve_2d' in metrics:\n pve_2d_smpl = pve_2d_smpl_sum / (num_samples * num_vertices)\n print('PVE 2D SMPL: {:.5f}'.format(pve_2d_smpl))\n pve_2d_graph = pve_2d_graph_sum / (num_samples * num_vertices)\n print('PVE 2D GRAPH: {:.5f}'.format(pve_2d_graph))\n\n if 'pve_2d_scale_corrected' in metrics:\n pve_2d_sc_smpl = pve_2d_scale_corrected_smpl_sum / (num_samples * num_vertices)\n print('PVE 2D SC SMPL: {:.5f}'.format(pve_2d_sc_smpl))\n pve_2d_sc_graph = pve_2d_scale_corrected_graph_sum / (num_samples * num_vertices)\n print('PVE 2D SC GRAPH: {:.5f}'.format(pve_2d_sc_graph))\n\n if 'pve_2d_pa' in metrics:\n pve_2d_pa_smpl = pve_2d_pa_smpl_sum / (num_samples * num_vertices)\n print('PVE 2D PA SMPL: {:.5f}'.format(pve_2d_pa_smpl))\n pve_2d_pa_graph = pve_2d_pa_graph_sum / (num_samples * num_vertices)\n print('PVE 2D PA GRAPH: {:.5f}'.format(pve_2d_pa_graph))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')\n parser.add_argument('--gpu', default=\"0\", type=str, help='GPU')\n args = parser.parse_args()\n\n # Device\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n # Load model\n mesh = Mesh(device=device)\n # Our pretrained networks have 5 residual blocks with 256 channels.\n # You might want to change this if you use a different architecture.\n model = CMR(mesh, 5, 256, pretrained_checkpoint=args.checkpoint, device=device)\n model.to(device)\n model.eval()\n\n # Setup evaluation dataset\n dataset_path = '/scratch2/as2562/datasets/3DPW/test'\n dataset = PW3DEvalDataset(dataset_path, img_wh=config.INPUT_RES)\n print(\"Eval examples found:\", len(dataset))\n\n # Metrics\n metrics = ['pve', 'pve-t', 'pve_pa', 'pve-t_pa', 'mpjpe', 'j3d_rec_err',\n 'pve_2d', 'pve_2d_pa', 'pve_2d_scale_corrected',\n 'pve_scale_corrected', 'pve-t_scale_corrected', 'mpjpe_scale_corrected']\n\n save_path = '/data/cvfs/as2562/GraphCMR/evaluations/3dpw'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n # Run evaluation\n evaluate_3dpw(model=model,\n eval_dataset=dataset,\n metrics=metrics,\n device=device,\n vis_save_path=save_path,\n num_workers=4,\n pin_memory=True,\n vis_every_n_batches=1000)\n\n\n\n\n\n\n\n"
] |
[
[
"matplotlib.use",
"numpy.concatenate",
"torch.device",
"numpy.linalg.norm",
"numpy.sum",
"matplotlib.pyplot.savefig",
"numpy.load",
"matplotlib.pyplot.close",
"numpy.mean",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"numpy.transpose",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"torch.matmul",
"matplotlib.pyplot.subplot"
]
] |
jamt9000/vision
|
[
"b6f28ec1a8c5fdb8d01cc61946e8f87dddcfa830",
"b6f28ec1a8c5fdb8d01cc61946e8f87dddcfa830",
"598b61d93357139cec558af6eff38a77ac60cabc"
] |
[
"torchvision/extension.py",
"torchvision/models/quantization/googlenet.py",
"test/test_onnx.py"
] |
[
"_HAS_OPS = False\n\n\ndef _register_extensions():\n import os\n import imp\n import torch\n\n # load the custom_op_library and register the custom ops\n lib_dir = os.path.dirname(__file__)\n _, path, _ = imp.find_module(\"_C\", [lib_dir])\n torch.ops.load_library(path)\n\n\ntry:\n _register_extensions()\n _HAS_OPS = True\nexcept (ImportError, OSError):\n pass\n\n\ndef _check_cuda_version():\n \"\"\"\n Make sure that CUDA versions match between the pytorch install and torchvision install\n \"\"\"\n if not _HAS_OPS:\n return -1\n import torch\n _version = torch.ops.torchvision._cuda_version()\n if _version != -1 and torch.version.cuda is not None:\n tv_version = str(_version)\n if int(tv_version) < 10000:\n tv_major = int(tv_version[0])\n tv_minor = int(tv_version[2])\n else:\n tv_major = int(tv_version[0:2])\n tv_minor = int(tv_version[3])\n t_version = torch.version.cuda\n t_version = t_version.split('.')\n t_major = int(t_version[0])\n t_minor = int(t_version[1])\n if t_major != tv_major or t_minor != tv_minor:\n raise RuntimeError(\"Detected that PyTorch and torchvision were compiled with different CUDA versions. \"\n \"PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. \"\n \"Please reinstall the torchvision that matches your PyTorch install.\"\n .format(t_major, t_minor, tv_major, tv_minor))\n return _version\n\n\n_check_cuda_version()\n",
"import warnings\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.jit.annotations import Optional\n\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom torchvision.models.googlenet import (\n GoogLeNetOutputs, BasicConv2d, Inception, InceptionAux, GoogLeNet, model_urls)\n\nfrom .utils import _replace_relu, quantize_model\n\n\n__all__ = ['QuantizableGoogLeNet', 'googlenet']\n\nquant_model_urls = {\n # fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch\n 'googlenet_fbgemm': 'https://download.pytorch.org/models/quantized/googlenet_fbgemm-c00238cf.pth',\n}\n\n\ndef googlenet(pretrained=False, progress=True, quantize=False, **kwargs):\n r\"\"\"GoogLeNet (Inception v1) model architecture from\n `\"Going Deeper with Convolutions\" <http://arxiv.org/abs/1409.4842>`_.\n\n Note that quantize = True returns a quantized model with 8 bit\n weights. Quantized models only support inference and run on CPUs.\n GPU inference is not yet supported\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n aux_logits (bool): If True, adds two auxiliary branches that can improve training.\n Default: *False* when pretrained is True otherwise *True*\n transform_input (bool): If True, preprocesses the input according to the method with which it\n was trained on ImageNet. Default: *False*\n \"\"\"\n if pretrained:\n if 'transform_input' not in kwargs:\n kwargs['transform_input'] = True\n if 'aux_logits' not in kwargs:\n kwargs['aux_logits'] = False\n if kwargs['aux_logits']:\n warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '\n 'so make sure to train them')\n original_aux_logits = kwargs['aux_logits']\n kwargs['aux_logits'] = True\n kwargs['init_weights'] = False\n\n model = QuantizableGoogLeNet(**kwargs)\n _replace_relu(model)\n\n if quantize:\n # TODO use pretrained as a string to specify the backend\n backend = 'fbgemm'\n quantize_model(model, backend)\n else:\n assert pretrained in [True, False]\n\n if pretrained:\n if quantize:\n model_url = quant_model_urls['googlenet' + '_' + backend]\n else:\n model_url = model_urls['googlenet']\n\n state_dict = load_state_dict_from_url(model_url,\n progress=progress)\n\n model.load_state_dict(state_dict)\n\n if not original_aux_logits:\n model.aux_logits = False\n del model.aux1, model.aux2\n return model\n\n\nclass QuantizableBasicConv2d(BasicConv2d):\n\n def __init__(self, *args, **kwargs):\n super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n def fuse_model(self):\n torch.quantization.fuse_modules(self, [\"conv\", \"bn\", \"relu\"], inplace=True)\n\n\nclass QuantizableInception(Inception):\n\n def __init__(self, *args, **kwargs):\n super(QuantizableInception, self).__init__(\n conv_block=QuantizableBasicConv2d, *args, **kwargs)\n self.cat = nn.quantized.FloatFunctional()\n\n def forward(self, x):\n outputs = self._forward(x)\n return self.cat.cat(outputs, 1)\n\n\nclass QuantizableInceptionAux(InceptionAux):\n\n def __init__(self, *args, **kwargs):\n super(QuantizableInceptionAux, self).__init__(\n conv_block=QuantizableBasicConv2d, *args, **kwargs)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.7)\n\n def forward(self, x):\n # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14\n x = F.adaptive_avg_pool2d(x, (4, 4))\n # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4\n x = self.conv(x)\n # N x 128 x 4 x 4\n x = torch.flatten(x, 1)\n # N x 2048\n x = self.relu(self.fc1(x))\n # N x 1024\n x = self.dropout(x)\n # N x 1024\n x = self.fc2(x)\n # N x 1000 (num_classes)\n\n return x\n\n\nclass QuantizableGoogLeNet(GoogLeNet):\n\n def __init__(self, *args, **kwargs):\n super(QuantizableGoogLeNet, self).__init__(\n blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux],\n *args,\n **kwargs\n )\n self.quant = torch.quantization.QuantStub()\n self.dequant = torch.quantization.DeQuantStub()\n\n def forward(self, x):\n x = self._transform_input(x)\n x = self.quant(x)\n x, aux1, aux2 = self._forward(x)\n x = self.dequant(x)\n aux_defined = self.training and self.aux_logits\n if torch.jit.is_scripting():\n if not aux_defined:\n warnings.warn(\"Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple\")\n return GoogLeNetOutputs(x, aux2, aux1)\n else:\n return self.eager_outputs(x, aux2, aux1)\n\n def fuse_model(self):\n r\"\"\"Fuse conv/bn/relu modules in googlenet model\n\n Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.\n Model is modified in place. Note that this operation does not change numerics\n and the model after modification is in floating point\n \"\"\"\n\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()\n",
"import io\nimport torch\nfrom torchvision import ops\nfrom torchvision import models\nfrom torchvision.models.detection.image_list import ImageList\nfrom torchvision.models.detection.transform import GeneralizedRCNNTransform\nfrom torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\nfrom torchvision.models.detection.roi_heads import RoIHeads\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNHeads, MaskRCNNPredictor\n\nfrom collections import OrderedDict\n\n# onnxruntime requires python 3.5 or above\ntry:\n import onnxruntime\nexcept ImportError:\n onnxruntime = None\n\nimport unittest\nfrom torchvision.ops._register_onnx_ops import _onnx_opset_version\n\n\n@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable')\nclass ONNXExporterTester(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n torch.manual_seed(123)\n\n def run_model(self, model, inputs_list, tolerate_small_mismatch=False):\n model.eval()\n\n onnx_io = io.BytesIO()\n # export to onnx with the first input\n torch.onnx.export(model, inputs_list[0], onnx_io,\n do_constant_folding=True, opset_version=_onnx_opset_version)\n\n # validate the exported model with onnx runtime\n for test_inputs in inputs_list:\n with torch.no_grad():\n if isinstance(test_inputs, torch.Tensor) or \\\n isinstance(test_inputs, list):\n test_inputs = (test_inputs,)\n test_ouputs = model(*test_inputs)\n if isinstance(test_ouputs, torch.Tensor):\n test_ouputs = (test_ouputs,)\n self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch)\n\n def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):\n\n inputs, _ = torch.jit._flatten(inputs)\n outputs, _ = torch.jit._flatten(outputs)\n\n def to_numpy(tensor):\n if tensor.requires_grad:\n return tensor.detach().cpu().numpy()\n else:\n return tensor.cpu().numpy()\n\n inputs = list(map(to_numpy, inputs))\n outputs = list(map(to_numpy, outputs))\n\n ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())\n # compute onnxruntime output prediction\n ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))\n ort_outs = ort_session.run(None, ort_inputs)\n for i in range(0, len(outputs)):\n try:\n torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)\n except AssertionError as error:\n if tolerate_small_mismatch:\n self.assertIn(\"(0.00%)\", str(error), str(error))\n else:\n raise\n\n def test_nms(self):\n boxes = torch.rand(5, 4)\n boxes[:, 2:] += torch.rand(5, 2)\n scores = torch.randn(5)\n\n class Module(torch.nn.Module):\n def forward(self, boxes, scores):\n return ops.nms(boxes, scores, 0.5)\n\n self.run_model(Module(), [(boxes, scores)])\n\n def test_roi_align(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)\n model = ops.RoIAlign((5, 5), 1, 2)\n self.run_model(model, [(x, single_roi)])\n\n def test_roi_pool(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)\n pool_h = 5\n pool_w = 5\n model = ops.RoIPool((pool_h, pool_w), 2)\n self.run_model(model, [(x, rois)])\n\n def test_transform_images(self):\n\n class TransformModule(torch.nn.Module):\n def __init__(self_module):\n super(TransformModule, self_module).__init__()\n self_module.transform = self._init_test_generalized_rcnn_transform()\n\n def forward(self_module, images):\n return self_module.transform(images)[0].tensors\n\n input = [torch.rand(3, 100, 200), torch.rand(3, 200, 200)]\n input_test = [torch.rand(3, 100, 200), torch.rand(3, 200, 200)]\n self.run_model(TransformModule(), [input, input_test])\n\n def _init_test_generalized_rcnn_transform(self):\n min_size = 100\n max_size = 200\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n return transform\n\n def _init_test_rpn(self):\n anchor_sizes = ((32,), (64,), (128,), (256,), (512,))\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)\n out_channels = 256\n rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])\n rpn_fg_iou_thresh = 0.7\n rpn_bg_iou_thresh = 0.3\n rpn_batch_size_per_image = 256\n rpn_positive_fraction = 0.5\n rpn_pre_nms_top_n = dict(training=2000, testing=1000)\n rpn_post_nms_top_n = dict(training=2000, testing=1000)\n rpn_nms_thresh = 0.7\n\n rpn = RegionProposalNetwork(\n rpn_anchor_generator, rpn_head,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh)\n return rpn\n\n def _init_test_roi_heads_faster_rcnn(self):\n out_channels = 256\n num_classes = 91\n\n box_fg_iou_thresh = 0.5\n box_bg_iou_thresh = 0.5\n box_batch_size_per_image = 512\n box_positive_fraction = 0.25\n bbox_reg_weights = None\n box_score_thresh = 0.05\n box_nms_thresh = 0.5\n box_detections_per_img = 100\n\n box_roi_pool = ops.MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=7,\n sampling_ratio=2)\n\n resolution = box_roi_pool.output_size[0]\n representation_size = 1024\n box_head = TwoMLPHead(\n out_channels * resolution ** 2,\n representation_size)\n\n representation_size = 1024\n box_predictor = FastRCNNPredictor(\n representation_size,\n num_classes)\n\n roi_heads = RoIHeads(\n box_roi_pool, box_head, box_predictor,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights,\n box_score_thresh, box_nms_thresh, box_detections_per_img)\n return roi_heads\n\n def get_features(self, images):\n s0, s1 = images.shape[-2:]\n features = [\n ('0', torch.rand(2, 256, s0 // 4, s1 // 4)),\n ('1', torch.rand(2, 256, s0 // 8, s1 // 8)),\n ('2', torch.rand(2, 256, s0 // 16, s1 // 16)),\n ('3', torch.rand(2, 256, s0 // 32, s1 // 32)),\n ('4', torch.rand(2, 256, s0 // 64, s1 // 64)),\n ]\n features = OrderedDict(features)\n return features\n\n def test_rpn(self):\n class RPNModule(torch.nn.Module):\n def __init__(self_module, images):\n super(RPNModule, self_module).__init__()\n self_module.rpn = self._init_test_rpn()\n self_module.images = ImageList(images, [i.shape[-2:] for i in images])\n\n def forward(self_module, features):\n return self_module.rpn(self_module.images, features)\n\n images = torch.rand(2, 3, 600, 600)\n features = self.get_features(images)\n test_features = self.get_features(images)\n\n model = RPNModule(images)\n model.eval()\n model(features)\n self.run_model(model, [(features,), (test_features,)], tolerate_small_mismatch=True)\n\n def test_multi_scale_roi_align(self):\n\n class TransformModule(torch.nn.Module):\n def __init__(self):\n super(TransformModule, self).__init__()\n self.model = ops.MultiScaleRoIAlign(['feat1', 'feat2'], 3, 2)\n self.image_sizes = [(512, 512)]\n\n def forward(self, input, boxes):\n return self.model(input, boxes, self.image_sizes)\n\n i = OrderedDict()\n i['feat1'] = torch.rand(1, 5, 64, 64)\n i['feat2'] = torch.rand(1, 5, 16, 16)\n boxes = torch.rand(6, 4) * 256\n boxes[:, 2:] += boxes[:, :2]\n\n i1 = OrderedDict()\n i1['feat1'] = torch.rand(1, 5, 64, 64)\n i1['feat2'] = torch.rand(1, 5, 16, 16)\n boxes1 = torch.rand(6, 4) * 256\n boxes1[:, 2:] += boxes1[:, :2]\n\n self.run_model(TransformModule(), [(i, [boxes],), (i1, [boxes1],)])\n\n def test_roi_heads(self):\n class RoiHeadsModule(torch.nn.Module):\n def __init__(self_module, images):\n super(RoiHeadsModule, self_module).__init__()\n self_module.transform = self._init_test_generalized_rcnn_transform()\n self_module.rpn = self._init_test_rpn()\n self_module.roi_heads = self._init_test_roi_heads_faster_rcnn()\n self_module.original_image_sizes = [img.shape[-2:] for img in images]\n self_module.images = ImageList(images, [i.shape[-2:] for i in images])\n\n def forward(self_module, features):\n proposals, _ = self_module.rpn(self_module.images, features)\n detections, _ = self_module.roi_heads(features, proposals, self_module.images.image_sizes)\n detections = self_module.transform.postprocess(detections,\n self_module.images.image_sizes,\n self_module.original_image_sizes)\n return detections\n\n images = torch.rand(2, 3, 600, 600)\n features = self.get_features(images)\n test_features = self.get_features(images)\n\n model = RoiHeadsModule(images)\n model.eval()\n model(features)\n self.run_model(model, [(features,), (test_features,)])\n\n def get_image_from_url(self, url):\n import requests\n import numpy\n from PIL import Image\n from io import BytesIO\n from torchvision import transforms\n\n data = requests.get(url)\n image = Image.open(BytesIO(data.content)).convert(\"RGB\")\n image = image.resize((300, 200), Image.BILINEAR)\n\n to_tensor = transforms.ToTensor()\n return to_tensor(image)\n\n def get_test_images(self):\n image_url = \"http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg\"\n image = self.get_image_from_url(url=image_url)\n image_url2 = \"https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image05.png\"\n image2 = self.get_image_from_url(url=image_url2)\n images = [image]\n test_images = [image2]\n return images, test_images\n\n def test_faster_rcnn(self):\n images, test_images = self.get_test_images()\n\n model = models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True,\n min_size=200,\n max_size=300)\n model.eval()\n model(images)\n self.run_model(model, [(images,), (test_images,)])\n\n # Verify that paste_mask_in_image beahves the same in tracing.\n # This test also compares both paste_masks_in_image and _onnx_paste_masks_in_image\n # (since jit_trace witll call _onnx_paste_masks_in_image).\n def test_paste_mask_in_image(self):\n # disable profiling\n torch._C._jit_set_profiling_executor(False)\n torch._C._jit_set_profiling_mode(False)\n\n masks = torch.rand(10, 1, 26, 26)\n boxes = torch.rand(10, 4)\n boxes[:, 2:] += torch.rand(10, 2)\n boxes *= 50\n o_im_s = (100, 100)\n from torchvision.models.detection.roi_heads import paste_masks_in_image\n out = paste_masks_in_image(masks, boxes, o_im_s)\n jit_trace = torch.jit.trace(paste_masks_in_image,\n (masks, boxes,\n [torch.tensor(o_im_s[0]),\n torch.tensor(o_im_s[1])]))\n out_trace = jit_trace(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])])\n\n assert torch.all(out.eq(out_trace))\n\n masks2 = torch.rand(20, 1, 26, 26)\n boxes2 = torch.rand(20, 4)\n boxes2[:, 2:] += torch.rand(20, 2)\n boxes2 *= 100\n o_im_s2 = (200, 200)\n from torchvision.models.detection.roi_heads import paste_masks_in_image\n out2 = paste_masks_in_image(masks2, boxes2, o_im_s2)\n out_trace2 = jit_trace(masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])])\n\n assert torch.all(out2.eq(out_trace2))\n\n def test_mask_rcnn(self):\n images, test_images = self.get_test_images()\n\n model = models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300)\n model.eval()\n model(images)\n self.run_model(model, [(images,), (test_images,)])\n\n # Verify that heatmaps_to_keypoints behaves the same in tracing.\n # This test also compares both heatmaps_to_keypoints and _onnx_heatmaps_to_keypoints\n # (since jit_trace witll call _heatmaps_to_keypoints).\n # @unittest.skip(\"Disable test until Resize bug fixed in ORT\")\n def test_heatmaps_to_keypoints(self):\n # disable profiling\n torch._C._jit_set_profiling_executor(False)\n torch._C._jit_set_profiling_mode(False)\n\n maps = torch.rand(10, 1, 26, 26)\n rois = torch.rand(10, 4)\n from torchvision.models.detection.roi_heads import heatmaps_to_keypoints\n out = heatmaps_to_keypoints(maps, rois)\n jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois))\n out_trace = jit_trace(maps, rois)\n\n assert torch.all(out[0].eq(out_trace[0]))\n assert torch.all(out[1].eq(out_trace[1]))\n\n maps2 = torch.rand(20, 2, 21, 21)\n rois2 = torch.rand(20, 4)\n from torchvision.models.detection.roi_heads import heatmaps_to_keypoints\n out2 = heatmaps_to_keypoints(maps2, rois2)\n out_trace2 = jit_trace(maps2, rois2)\n\n assert torch.all(out2[0].eq(out_trace2[0]))\n assert torch.all(out2[1].eq(out_trace2[1]))\n\n @unittest.skip(\"Disable test until Argmax is updated in ONNX\")\n def test_keypoint_rcnn(self):\n images, test_images = self.get_test_images()\n\n model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300)\n model.eval()\n model(test_images)\n self.run_model(model, [(images,), (test_images,)])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"torch.ops.load_library",
"torch.ops.torchvision._cuda_version"
],
[
"torch.nn.Dropout",
"torch.quantization.QuantStub",
"torch.quantization.DeQuantStub",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.ReLU",
"torch.jit.is_scripting",
"torch.flatten",
"torch.nn.quantized.FloatFunctional",
"torch.quantization.fuse_modules"
],
[
"torch.rand",
"torch._C._jit_set_profiling_executor",
"torch.no_grad",
"torch.manual_seed",
"torch.jit._flatten",
"torch.tensor",
"torch.onnx.export",
"torch._C._jit_set_profiling_mode",
"torch.jit.trace",
"torch.testing.assert_allclose",
"torch.randn"
]
] |
iblamedom/kuenstliche-intelligenz
|
[
"733ecdcfcb561aa1b39854b1b3632c0fda07f841"
] |
[
"tests/test_set_random_seed.py"
] |
[
"import random\nimport unittest\nfrom typing import Tuple\n\nimport torch\nimport numpy as np\n\nfrom src.utilities import set_random_seed\n\n\n_RANDOM_SEED: int = random.randint(0, 100)\n_TEST_ARRAY_SIZE: Tuple[int, int] = (2, 2)\n_TEST_TENSOR_SIZE: Tuple[int, int] = (2, 2)\n\n\ndef _set_random_seed():\n set_random_seed(\n random_seed=_RANDOM_SEED,\n )\n\n\nclass TestSetRandomSeed(unittest.TestCase):\n \"\"\"Unit test class for ``set_random_seed`` function.\n\n The test checks the random seed function for Python random,\n NumPy, and PyTorch by asserting the first random number, array,\n or tensor is always the same after seeding.\n\n \"\"\"\n def test_random(self):\n _set_random_seed()\n _random = random.random()\n _set_random_seed()\n assert _random == random.random()\n\n def test_numpy(self):\n _set_random_seed()\n _array = np.random.random(size=_TEST_ARRAY_SIZE)\n _set_random_seed()\n assert (_array == np.random.random(size=_TEST_ARRAY_SIZE)).all()\n\n def test_torch(self):\n _set_random_seed()\n _tensor = torch.rand(size=_TEST_TENSOR_SIZE)\n _set_random_seed()\n assert (_tensor == torch.rand(size=_TEST_TENSOR_SIZE)).all()\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.random",
"torch.rand"
]
] |
non778/examples
|
[
"d1eed1a6a987b0ebbb0341925a480dc3e60489ee"
] |
[
"lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"End-to-end tests that check model correctness.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.compat import v1 as tfv1\n\n# pylint: disable=g-bad-import-order\nfrom tfltransfer import bases\nfrom tfltransfer import optimizers\nfrom tfltransfer import heads\nfrom tfltransfer import tflite_transfer_converter\n# pylint: enable=g-bad-import-order\n\nIMAGE_SIZE = 224\nBATCH_SIZE = 128\nNUM_CLASSES = 5\nVALIDATION_SPLIT = 0.2\nLEARNING_RATE = 0.001\nBOTTLENECK_SHAPE = (7, 7, 1280)\n\nDATASET_URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'\n\n\nclass TransferModel(object):\n \"\"\"Test consumer of models generated by the converter.\"\"\"\n\n def __init__(self, dataset_dir, base_model, head_model, optimizer):\n \"\"\"Creates a wrapper for a set of models and a data set.\"\"\"\n self.dataset_dir = dataset_dir\n\n datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n rescale=1. / 255, validation_split=VALIDATION_SPLIT)\n self.train_img_generator = datagen.flow_from_directory(\n self.dataset_dir,\n target_size=(IMAGE_SIZE, IMAGE_SIZE),\n batch_size=BATCH_SIZE,\n subset='training')\n self.val_img_generator = datagen.flow_from_directory(\n self.dataset_dir,\n target_size=(IMAGE_SIZE, IMAGE_SIZE),\n batch_size=BATCH_SIZE,\n subset='validation')\n\n converter = tflite_transfer_converter.TFLiteTransferConverter(\n NUM_CLASSES, base_model, head_model, optimizer, BATCH_SIZE)\n models = converter._convert()\n self.initialize_model = models['initialize']\n self.bottleneck_model = models['bottleneck']\n self.train_head_model = models['train_head']\n self.inference_model = models['inference']\n self.optimizer_model = models['optimizer']\n self.variables = self._generate_initial_variables()\n\n optim_state_shapes = self._optimizer_state_shapes()\n self.optim_state = [\n np.zeros(shape, dtype=np.float32) for shape in optim_state_shapes\n ]\n\n def _generate_initial_variables(self):\n \"\"\"Generates the initial model variables.\"\"\"\n interpreter = tf.lite.Interpreter(model_content=self.initialize_model)\n zero_in = interpreter.get_input_details()[0]\n variable_outs = interpreter.get_output_details()\n interpreter.allocate_tensors()\n interpreter.set_tensor(zero_in['index'], np.float32(0.))\n interpreter.invoke()\n return [interpreter.get_tensor(var['index']) for var in variable_outs]\n\n def _optimizer_state_shapes(self):\n \"\"\"Reads the shapes of the optimizer parameters (mutable state).\"\"\"\n interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)\n num_variables = len(self.variables)\n optim_state_inputs = interpreter.get_input_details()[num_variables * 2:]\n return [input_['shape'] for input_ in optim_state_inputs]\n\n def prepare_bottlenecks(self):\n \"\"\"Passes all images through the base model and save the bottlenecks.\n\n This method has to be called before any training or inference.\n \"\"\"\n self.train_bottlenecks, self.train_labels = (\n self._collect_and_generate_bottlenecks(self.train_img_generator))\n self.val_bottlenecks, self.val_labels = (\n self._collect_and_generate_bottlenecks(self.val_img_generator))\n\n def _collect_and_generate_bottlenecks(self, image_gen):\n \"\"\"Consumes a generator and converts all images to bottlenecks.\n\n Args:\n image_gen: A Keras data generator for images to process\n\n Returns:\n Two NumPy arrays: (bottlenecks, labels).\n \"\"\"\n collected_bottlenecks = np.zeros(\n (image_gen.samples,) + BOTTLENECK_SHAPE, dtype=np.float32)\n collected_labels = np.zeros((image_gen.samples, NUM_CLASSES),\n dtype=np.float32)\n\n next_idx = 0\n for bottlenecks, truth in self._generate_bottlenecks(\n make_finite(image_gen)):\n batch_size = bottlenecks.shape[0]\n collected_bottlenecks[next_idx:next_idx + batch_size] = bottlenecks\n collected_labels[next_idx:next_idx + batch_size] = truth\n next_idx += batch_size\n\n return collected_bottlenecks, collected_labels\n\n def _generate_bottlenecks(self, image_gen):\n \"\"\"Generator adapter that passes images through the bottleneck model.\n\n Args:\n image_gen: A generator that returns images to be processed. Images are\n paired with ground truth labels.\n\n Yields:\n Bottlenecks from input images, paired with ground truth labels.\n \"\"\"\n interpreter = tf.lite.Interpreter(model_content=self.bottleneck_model)\n [x_in] = interpreter.get_input_details()\n [bottleneck_out] = interpreter.get_output_details()\n\n for (x, y) in image_gen:\n batch_size = x.shape[0]\n interpreter.resize_tensor_input(x_in['index'],\n (batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))\n interpreter.allocate_tensors()\n interpreter.set_tensor(x_in['index'], x)\n interpreter.invoke()\n bottleneck = interpreter.get_tensor(bottleneck_out['index'])\n yield bottleneck, y\n\n def train_head(self, num_epochs):\n \"\"\"Trains the head model for a given number of epochs.\n\n SGD is used as an optimizer.\n\n Args:\n num_epochs: how many epochs should be trained\n\n Returns:\n A list of train_loss values after every epoch trained.\n\n Raises:\n RuntimeError: when prepare_bottlenecks() has not been called.\n \"\"\"\n if not hasattr(self, 'train_bottlenecks'):\n raise RuntimeError('prepare_bottlenecks has not been called')\n results = []\n for _ in range(num_epochs):\n loss = self._train_one_epoch(\n self._generate_batches(self.train_bottlenecks, self.train_labels))\n results.append(loss)\n return results\n\n def _generate_batches(self, x, y):\n \"\"\"Creates a generator that iterates over the data in batches.\"\"\"\n num_total = x.shape[0]\n for begin in range(0, num_total, BATCH_SIZE):\n end = min(begin + BATCH_SIZE, num_total)\n yield x[begin:end], y[begin:end]\n\n def _train_one_epoch(self, train_gen):\n \"\"\"Performs one training epoch.\"\"\"\n interpreter = tf.lite.Interpreter(model_content=self.train_head_model)\n interpreter.allocate_tensors()\n x_in, y_in = interpreter.get_input_details()[:2]\n variable_ins = interpreter.get_input_details()[2:]\n loss_out = interpreter.get_output_details()[0]\n gradient_outs = interpreter.get_output_details()[1:]\n\n epoch_loss = 0.\n num_processed = 0\n for bottlenecks, truth in train_gen:\n batch_size = bottlenecks.shape[0]\n if batch_size < BATCH_SIZE:\n bottlenecks = pad_batch(bottlenecks, BATCH_SIZE)\n truth = pad_batch(truth, BATCH_SIZE)\n\n interpreter.set_tensor(x_in['index'], bottlenecks)\n interpreter.set_tensor(y_in['index'], truth)\n for variable_in, variable_value in zip(variable_ins, self.variables):\n interpreter.set_tensor(variable_in['index'], variable_value)\n interpreter.invoke()\n\n loss = interpreter.get_tensor(loss_out['index'])\n gradients = [\n interpreter.get_tensor(gradient_out['index'])\n for gradient_out in gradient_outs\n ]\n\n self._apply_gradients(gradients)\n epoch_loss += loss * batch_size\n num_processed += batch_size\n\n epoch_loss /= num_processed\n return epoch_loss\n\n def _apply_gradients(self, gradients):\n \"\"\"Applies the optimizer to the model parameters.\"\"\"\n interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)\n interpreter.allocate_tensors()\n num_variables = len(self.variables)\n variable_ins = interpreter.get_input_details()[:num_variables]\n gradient_ins = interpreter.get_input_details()[num_variables:num_variables *\n 2]\n state_ins = interpreter.get_input_details()[num_variables * 2:]\n variable_outs = interpreter.get_output_details()[:num_variables]\n state_outs = interpreter.get_output_details()[num_variables:]\n\n for variable, gradient, variable_in, gradient_in in zip(\n self.variables, gradients, variable_ins, gradient_ins):\n interpreter.set_tensor(variable_in['index'], variable)\n interpreter.set_tensor(gradient_in['index'], gradient)\n\n for optim_state_elem, state_in in zip(self.optim_state, state_ins):\n interpreter.set_tensor(state_in['index'], optim_state_elem)\n\n interpreter.invoke()\n self.variables = [\n interpreter.get_tensor(variable_out['index'])\n for variable_out in variable_outs\n ]\n self.optim_state = [\n interpreter.get_tensor(state_out['index']) for state_out in state_outs\n ]\n\n def measure_inference_accuracy(self):\n \"\"\"Runs the inference model and measures accuracy on the validation set.\"\"\"\n interpreter = tf.lite.Interpreter(model_content=self.inference_model)\n bottleneck_in = interpreter.get_input_details()[0]\n variable_ins = interpreter.get_input_details()[1:]\n [y_out] = interpreter.get_output_details()\n\n inference_accuracy = 0.\n num_processed = 0\n for bottleneck, truth in self._generate_batches(self.val_bottlenecks,\n self.val_labels):\n batch_size = bottleneck.shape[0]\n interpreter.resize_tensor_input(bottleneck_in['index'],\n (batch_size,) + BOTTLENECK_SHAPE)\n interpreter.allocate_tensors()\n\n interpreter.set_tensor(bottleneck_in['index'], bottleneck)\n for variable_in, variable_value in zip(variable_ins, self.variables):\n interpreter.set_tensor(variable_in['index'], variable_value)\n interpreter.invoke()\n\n preds = interpreter.get_tensor(y_out['index'])\n\n acc = (np.argmax(preds, axis=1) == np.argmax(truth,\n axis=1)).sum() / batch_size\n inference_accuracy += acc * batch_size\n num_processed += batch_size\n\n inference_accuracy /= num_processed\n return inference_accuracy\n\n\ndef make_finite(data_gen):\n \"\"\"An adapter for Keras data generators that makes them finite.\n\n The default behavior in Keras is to keep looping infinitely through\n the data.\n\n Args:\n data_gen: An infinite Keras data generator.\n\n Yields:\n Same values as the parameter generator.\n \"\"\"\n num_samples = data_gen.samples\n num_processed = 0\n for batch in data_gen:\n batch_size = batch[0].shape[0]\n if batch_size + num_processed > num_samples:\n batch_size = num_samples - num_processed\n should_stop = True\n else:\n should_stop = False\n if batch_size == 0:\n return\n\n batch = tuple(x[:batch_size] for x in batch)\n yield batch\n num_processed += batch_size\n if should_stop:\n return\n\n\n# TODO(b/135138207) investigate if we can get rid of this.\ndef pad_batch(batch, batch_size):\n \"\"\"Resize batch to a given size, tiling present samples over missing.\n\n Example:\n Suppose batch_size is 5, batch is [1, 2].\n Then the return value is [1, 2, 1, 2, 1].\n\n Args:\n batch: An ndarray with first dimension size <= batch_size.\n batch_size: Desired size for first dimension.\n\n Returns:\n An ndarray of the same shape, except first dimension has\n the desired size.\n \"\"\"\n padded = np.zeros((batch_size,) + batch.shape[1:], dtype=batch.dtype)\n next_idx = 0\n while next_idx < batch_size:\n fill_len = min(batch.shape[0], batch_size - next_idx)\n padded[next_idx:next_idx + fill_len] = batch[:fill_len]\n next_idx += fill_len\n return padded\n\n\nclass ModelCorrectnessTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(ModelCorrectnessTest, cls).setUpClass()\n zip_file = tf.keras.utils.get_file(\n origin=DATASET_URL, fname='flower_photos.tgz', extract=True)\n cls.dataset_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')\n\n mobilenet_dir = tempfile.mkdtemp('tflite-transfer-test')\n mobilenet_keras = tf.keras.applications.MobileNetV2(\n input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),\n include_top=False,\n weights='imagenet')\n tfv1.keras.experimental.export_saved_model(mobilenet_keras, mobilenet_dir)\n cls.mobilenet_dir = mobilenet_dir\n\n def setUp(self):\n super(ModelCorrectnessTest, self).setUp()\n self.mobilenet_dir = ModelCorrectnessTest.mobilenet_dir\n self.dataset_dir = ModelCorrectnessTest.dataset_dir\n\n def test_mobilenet_v2_saved_model_and_softmax_classifier(self):\n base_model = bases.SavedModelBase(self.mobilenet_dir)\n head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,\n NUM_CLASSES)\n optimizer = optimizers.SGD(LEARNING_RATE)\n model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)\n self.assertModelAchievesAccuracy(model, 0.80)\n\n def test_mobilenet_v2_saved_model_quantized_and_softmax_classifier(self):\n base_model = bases.SavedModelBase(self.mobilenet_dir, quantize=True)\n head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,\n NUM_CLASSES)\n optimizer = optimizers.SGD(LEARNING_RATE)\n model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)\n self.assertModelAchievesAccuracy(model, 0.80)\n\n def test_mobilenet_v2_base_and_softmax_classifier(self):\n base_model = bases.MobileNetV2Base()\n head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,\n NUM_CLASSES)\n optimizer = optimizers.SGD(LEARNING_RATE)\n model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)\n self.assertModelAchievesAccuracy(model, 0.80)\n\n def test_mobilenet_v2_base_and_softmax_classifier_l2(self):\n base_model = bases.MobileNetV2Base()\n head_model = heads.SoftmaxClassifierHead(\n BATCH_SIZE, BOTTLENECK_SHAPE, NUM_CLASSES, l2_reg=0.1)\n optimizer = optimizers.SGD(LEARNING_RATE)\n model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)\n self.assertModelAchievesAccuracy(model, 0.80)\n\n def test_mobilenet_v2_base_quantized_and_softmax_classifier(self):\n base_model = bases.MobileNetV2Base(quantize=True)\n head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,\n NUM_CLASSES)\n optimizer = optimizers.SGD(LEARNING_RATE)\n model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)\n self.assertModelAchievesAccuracy(model, 0.80)\n\n def test_mobilenet_v2_base_and_softmax_classifier_adam(self):\n base_model = bases.MobileNetV2Base()\n head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,\n NUM_CLASSES)\n optimizer = optimizers.Adam()\n model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)\n self.assertModelAchievesAccuracy(model, 0.80)\n\n def assertModelAchievesAccuracy(self, model, target_accuracy, num_epochs=30):\n model.prepare_bottlenecks()\n print('Bottlenecks prepared')\n history = model.train_head(num_epochs)\n print('Training completed, history = {}'.format(history))\n accuracy = model.measure_inference_accuracy()\n print('Final accuracy = {:.2f}'.format(accuracy))\n self.assertGreater(accuracy, target_accuracy)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.utils.get_file",
"numpy.zeros",
"tensorflow.lite.Interpreter",
"tensorflow.keras.applications.MobileNetV2",
"numpy.float32",
"numpy.argmax",
"tensorflow.compat.v1.keras.experimental.export_saved_model"
]
] |
marleneDebatin/flair
|
[
"4d17509f358158f66d43e85db1b6990523b0b095"
] |
[
"flair/models/tars_model.py"
] |
[
"import logging\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import List, Optional, Set, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.preprocessing import minmax_scale\nfrom tqdm import tqdm\n\nimport flair\nfrom flair.data import Dictionary, Sentence, Span, SpanLabel\nfrom flair.datasets import DataLoader, FlairDatapointDataset\nfrom flair.embeddings import (\n TokenEmbeddings,\n TransformerDocumentEmbeddings,\n TransformerWordEmbeddings,\n)\nfrom flair.file_utils import cached_path\nfrom flair.models.sequence_tagger_model import SequenceTagger\nfrom flair.models.text_classification_model import TextClassifier\nfrom flair.training_utils import store_embeddings\n\nlog = logging.getLogger(\"flair\")\n\n\nclass FewshotClassifier(flair.nn.Classifier[Sentence]):\n def __init__(self):\n self._current_task = None\n self._task_specific_attributes = {}\n self.label_nearest_map = None\n self.tars_model: flair.nn.Classifier[Sentence]\n\n super(FewshotClassifier, self).__init__()\n\n def forward_loss(\n self, data_points: Union[List[Sentence], Sentence]\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]:\n\n if not isinstance(data_points, list):\n data_points = [data_points]\n\n # Transform input data into TARS format\n sentences = self._get_tars_formatted_sentences(data_points)\n\n loss = self.tars_model.forward_loss(sentences)\n return loss\n\n @property\n def tars_embeddings(self):\n raise NotImplementedError\n\n def _get_tars_formatted_sentence(self, label, sentence):\n raise NotImplementedError\n\n def _get_tars_formatted_sentences(self, sentences: List[Sentence]):\n label_text_pairs = []\n all_labels = [label.decode(\"utf-8\") for label in self.get_current_label_dictionary().idx2item]\n for sentence in sentences:\n label_text_pairs_for_sentence = []\n if self.training and self.num_negative_labels_to_sample is not None:\n\n positive_labels = list(\n OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)])\n )\n\n sampled_negative_labels = self._get_nearest_labels_for(positive_labels)\n\n for label in positive_labels:\n label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))\n for label in sampled_negative_labels:\n label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))\n\n else:\n for label in all_labels:\n label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))\n label_text_pairs.extend(label_text_pairs_for_sentence)\n\n return label_text_pairs\n\n def _get_nearest_labels_for(self, labels):\n\n # if there are no labels, return a random sample as negatives\n if len(labels) == 0:\n tags = self.get_current_label_dictionary().get_items()\n import random\n\n sample = random.sample(tags, k=self.num_negative_labels_to_sample)\n return sample\n\n already_sampled_negative_labels = set()\n\n # otherwise, go through all labels\n for label in labels:\n\n plausible_labels = []\n plausible_label_probabilities = []\n for plausible_label in self.label_nearest_map[label]:\n if plausible_label in already_sampled_negative_labels or plausible_label in labels:\n continue\n else:\n plausible_labels.append(plausible_label)\n plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label])\n\n # make sure the probabilities always sum up to 1\n plausible_label_probabilities = np.array(plausible_label_probabilities, dtype=\"float64\")\n plausible_label_probabilities += 1e-08\n plausible_label_probabilities /= np.sum(plausible_label_probabilities)\n\n if len(plausible_labels) > 0:\n num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))\n sampled_negative_labels = np.random.choice(\n plausible_labels,\n num_samples,\n replace=False,\n p=plausible_label_probabilities,\n )\n already_sampled_negative_labels.update(sampled_negative_labels)\n\n return already_sampled_negative_labels\n\n def train(self, mode=True):\n \"\"\"Populate label similarity map based on cosine similarity before running epoch\n\n If the `num_negative_labels_to_sample` is set to an integer value then before starting\n each epoch the model would create a similarity measure between the label names based\n on cosine distances between their BERT encoded embeddings.\n \"\"\"\n if mode and self.num_negative_labels_to_sample is not None:\n self._compute_label_similarity_for_current_epoch()\n super().train(mode)\n\n super().train(mode)\n\n def _compute_label_similarity_for_current_epoch(self):\n \"\"\"\n Compute the similarity between all labels for better sampling of negatives\n \"\"\"\n\n # get and embed all labels by making a Sentence object that contains only the label text\n all_labels = [label.decode(\"utf-8\") for label in self.get_current_label_dictionary().idx2item]\n label_sentences = [Sentence(label) for label in all_labels]\n\n self.tars_embeddings.eval() # TODO: check if this is necessary\n self.tars_embeddings.embed(label_sentences)\n self.tars_embeddings.train()\n\n # get each label embedding and scale between 0 and 1\n if isinstance(self.tars_embeddings, TokenEmbeddings):\n encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences]\n else:\n encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences]\n\n normalized_encoding = minmax_scale(encodings_np)\n\n # compute similarity matrix\n similarity_matrix = cosine_similarity(normalized_encoding)\n\n # the higher the similarity, the greater the chance that a label is\n # sampled as negative example\n negative_label_probabilities = {}\n for row_index, label in enumerate(all_labels):\n negative_label_probabilities[label] = {}\n for column_index, other_label in enumerate(all_labels):\n if label != other_label:\n negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index]\n self.label_nearest_map = negative_label_probabilities\n\n def get_current_label_dictionary(self):\n label_dictionary = self._task_specific_attributes[self._current_task][\"label_dictionary\"]\n return label_dictionary\n\n def get_current_label_type(self):\n return self._task_specific_attributes[self._current_task][\"label_type\"]\n\n def is_current_task_multi_label(self):\n return self._task_specific_attributes[self._current_task][\"multi_label\"]\n\n def add_and_switch_to_new_task(\n self,\n task_name,\n label_dictionary: Union[List, Set, Dictionary, str],\n label_type: str,\n multi_label: bool = True,\n force_switch: bool = False,\n ):\n \"\"\"\n Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches'\n to the new task. Parameters are similar to the constructor except for model choice, batch\n size and negative sampling. This method does not store the resultant model onto disk.\n :param task_name: a string depicting the name of the task\n :param label_dictionary: dictionary of the labels you want to predict\n :param label_type: string to identify the label type ('ner', 'sentiment', etc.)\n :param multi_label: whether this task is a multi-label prediction problem\n :param force_switch: if True, will overwrite existing task with same name\n \"\"\"\n if task_name in self._task_specific_attributes and not force_switch:\n log.warning(\"Task `%s` already exists in TARS model. Switching to it.\", task_name)\n else:\n # make label dictionary if no Dictionary object is passed\n if isinstance(label_dictionary, Dictionary):\n label_dictionary = label_dictionary.get_items()\n if type(label_dictionary) == str:\n label_dictionary = [label_dictionary]\n\n # prepare dictionary of tags (without B- I- prefixes and without UNK)\n tag_dictionary = Dictionary(add_unk=False)\n for tag in label_dictionary:\n if tag == \"<unk>\" or tag == \"O\":\n continue\n if tag[1] == \"-\":\n tag = tag[2:]\n tag_dictionary.add_item(tag)\n else:\n tag_dictionary.add_item(tag)\n\n self._task_specific_attributes[task_name] = {\n \"label_dictionary\": tag_dictionary,\n \"label_type\": label_type,\n \"multi_label\": multi_label,\n }\n\n self.switch_to_task(task_name)\n\n def list_existing_tasks(self) -> Set[str]:\n \"\"\"\n Lists existing tasks in the loaded TARS model on the console.\n \"\"\"\n return set(self._task_specific_attributes.keys())\n\n def switch_to_task(self, task_name):\n \"\"\"\n Switches to a task which was previously added.\n \"\"\"\n if task_name not in self._task_specific_attributes:\n log.error(\n \"Provided `%s` does not exist in the model. Consider calling \" \"`add_and_switch_to_new_task` first.\",\n task_name,\n )\n else:\n self._current_task = task_name\n\n def _drop_task(self, task_name):\n if task_name in self._task_specific_attributes:\n if self._current_task == task_name:\n log.error(\n \"`%s` is the current task.\" \" Switch to some other task before dropping this.\",\n task_name,\n )\n else:\n self._task_specific_attributes.pop(task_name)\n else:\n log.warning(\"No task exists with the name `%s`.\", task_name)\n\n @staticmethod\n def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:\n filtered_sentences = [sentence for sentence in sentences if sentence.tokens]\n if len(sentences) != len(filtered_sentences):\n log.warning(f\"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.\")\n return filtered_sentences\n\n @property\n def label_type(self):\n return self.get_current_label_type()\n\n def predict_zero_shot(\n self,\n sentences: Union[List[Sentence], Sentence],\n candidate_label_set: Union[List[str], Set[str], str],\n multi_label: bool = True,\n ):\n \"\"\"\n Method to make zero shot predictions from the TARS model\n :param sentences: input sentence objects to classify\n :param candidate_label_set: set of candidate labels\n :param multi_label: indicates whether multi-label or single class prediction. Defaults to True.\n \"\"\"\n\n # check if candidate_label_set is empty\n if candidate_label_set is None or len(candidate_label_set) == 0:\n log.warning(\"Provided candidate_label_set is empty\")\n return\n\n # make list if only one candidate label is passed\n if isinstance(candidate_label_set, str):\n candidate_label_set = {candidate_label_set}\n\n # create label dictionary\n label_dictionary = Dictionary(add_unk=False)\n for label in candidate_label_set:\n label_dictionary.add_item(label)\n\n # note current task\n existing_current_task = self._current_task\n\n # create a temporary task\n self.add_and_switch_to_new_task(\n task_name=\"ZeroShot\",\n label_dictionary=label_dictionary,\n label_type=\"-\".join(label_dictionary.get_items()),\n multi_label=multi_label,\n )\n\n try:\n # make zero shot predictions\n self.predict(sentences)\n finally:\n # switch to the pre-existing task\n self.switch_to_task(existing_current_task)\n self._drop_task(\"ZeroShot\")\n\n return\n\n\nclass TARSTagger(FewshotClassifier):\n \"\"\"\n TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class\n sequence labeler which given a <label, text> pair predicts the probability for each word\n to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated\n by the model internally before pushing it through the transformer stack of BERT.\n \"\"\"\n\n static_label_type = \"tars_label\"\n\n def __init__(\n self,\n task_name: Optional[str] = None,\n label_dictionary: Optional[Dictionary] = None,\n label_type: Optional[str] = None,\n embeddings: Union[TransformerWordEmbeddings, str] = \"bert-base-uncased\",\n num_negative_labels_to_sample: int = 2,\n prefix: bool = True,\n **tagger_args,\n ):\n \"\"\"\n Initializes a TextClassifier\n :param task_name: a string depicting the name of the task\n :param label_dictionary: dictionary of labels you want to predict\n :param embeddings: name of the pre-trained transformer model e.g.,\n 'bert-base-uncased' etc\n :param num_negative_labels_to_sample: number of negative labels to sample for each\n positive labels against a sentence during training. Defaults to 2 negative\n labels for each positive label. The model would sample all the negative labels\n if None is passed. That slows down the training considerably.\n \"\"\"\n super(TARSTagger, self).__init__()\n\n if isinstance(embeddings, str):\n embeddings = TransformerWordEmbeddings(\n model=embeddings,\n fine_tune=True,\n layers=\"-1\",\n layer_mean=False,\n )\n\n # prepare TARS dictionary\n tars_dictionary = Dictionary(add_unk=False)\n tars_dictionary.add_item(\"entity\")\n tars_dictionary.span_labels = True\n\n # initialize a bare-bones sequence tagger\n self.tars_model: SequenceTagger = SequenceTagger(\n hidden_size=123,\n embeddings=embeddings,\n tag_dictionary=tars_dictionary,\n tag_type=self.static_label_type,\n use_crf=False,\n use_rnn=False,\n reproject_embeddings=False,\n **tagger_args,\n )\n\n # transformer separator\n self.separator = str(self.tars_embeddings.tokenizer.sep_token)\n if self.tars_embeddings.tokenizer._bos_token:\n self.separator += str(self.tars_embeddings.tokenizer.bos_token)\n\n self.prefix = prefix\n self.num_negative_labels_to_sample = num_negative_labels_to_sample\n\n if task_name and label_dictionary and label_type:\n # Store task specific labels since TARS can handle multiple tasks\n self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)\n else:\n log.info(\n \"TARS initialized without a task. You need to call .add_and_switch_to_new_task() \"\n \"before training this model\"\n )\n\n def _get_tars_formatted_sentence(self, label, sentence):\n\n original_text = sentence.to_tokenized_string()\n\n label_text_pair = (\n f\"{label} {self.separator} {original_text}\" if self.prefix else f\"{original_text} {self.separator} {label}\"\n )\n\n label_length = 0 if not self.prefix else len(label.split(\" \")) + len(self.separator.split(\" \"))\n\n # make a tars sentence where all labels are O by default\n tars_sentence = Sentence(label_text_pair, use_tokenizer=False)\n\n for entity_label in sentence.get_labels(self.label_type):\n if entity_label.value == label:\n new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span]\n tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value=\"entity\"))\n\n return tars_sentence\n\n def _get_state_dict(self):\n model_state = {\n \"state_dict\": self.state_dict(),\n \"current_task\": self._current_task,\n \"tag_type\": self.get_current_label_type(),\n \"tag_dictionary\": self.get_current_label_dictionary(),\n \"tars_model\": self.tars_model,\n \"num_negative_labels_to_sample\": self.num_negative_labels_to_sample,\n \"prefix\": self.prefix,\n \"task_specific_attributes\": self._task_specific_attributes,\n }\n return model_state\n\n @staticmethod\n def _fetch_model(model_name) -> str:\n\n if model_name == \"tars-ner\":\n cache_dir = Path(\"models\")\n model_name = cached_path(\n \"https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt\",\n cache_dir=cache_dir,\n )\n\n return model_name\n\n @staticmethod\n def _init_model_with_state_dict(state):\n\n # init new TARS classifier\n model = TARSTagger(\n task_name=state[\"current_task\"],\n label_dictionary=state[\"tag_dictionary\"],\n label_type=state[\"tag_type\"],\n embeddings=state[\"tars_model\"].embeddings,\n num_negative_labels_to_sample=state[\"num_negative_labels_to_sample\"],\n prefix=state[\"prefix\"],\n )\n # set all task information\n model._task_specific_attributes = state[\"task_specific_attributes\"]\n\n # linear layers of internal classifier\n model.load_state_dict(state[\"state_dict\"])\n return model\n\n @property\n def tars_embeddings(self):\n return self.tars_model.embeddings\n\n def predict(\n self,\n sentences: Union[List[Sentence], Sentence],\n mini_batch_size=32,\n return_probabilities_for_all_classes: bool = False,\n verbose: bool = False,\n label_name: Optional[str] = None,\n return_loss=False,\n embedding_storage_mode=\"none\",\n most_probable_first: bool = True,\n ):\n # return\n \"\"\"\n Predict sequence tags for Named Entity Recognition task\n :param sentences: a Sentence or a List of Sentence\n :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,\n up to a point when it has no more effect.\n :param all_tag_prob: True to compute the score for each tag on each token,\n otherwise only the score of the best tag is returned\n :param verbose: set to True to display a progress bar\n :param return_loss: set to True to return loss\n :param label_name: set this to change the name of the label type that is predicted\n :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if\n you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.\n 'gpu' to store embeddings in GPU memory.\n \"\"\"\n if label_name is None:\n label_name = self.get_current_label_type()\n\n # with torch.no_grad():\n if not sentences:\n return sentences\n\n if not isinstance(sentences, list):\n sentences = [sentences]\n\n reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)\n\n dataloader = DataLoader(\n dataset=FlairDatapointDataset(reordered_sentences),\n batch_size=mini_batch_size,\n )\n\n # progress bar for verbosity\n if verbose:\n dataloader = tqdm(dataloader)\n\n overall_loss = 0\n overall_count = 0\n with torch.no_grad():\n for batch in dataloader:\n\n batch = self._filter_empty_sentences(batch)\n # stop if all sentences are empty\n if not batch:\n continue\n\n # go through each sentence in the batch\n for sentence in batch:\n\n # always remove tags first\n sentence.remove_labels(label_name)\n\n all_labels = [label.decode(\"utf-8\") for label in self.get_current_label_dictionary().idx2item]\n\n all_detected = {}\n for label in all_labels:\n tars_sentence = self._get_tars_formatted_sentence(label, sentence)\n\n loss_and_count = self.tars_model.predict(\n tars_sentence,\n label_name=label_name,\n return_loss=True,\n )\n\n overall_loss += loss_and_count[0].item()\n overall_count += loss_and_count[1]\n\n for predicted in tars_sentence.get_labels(label_name):\n predicted.value = label\n all_detected[predicted] = predicted.score\n\n if most_probable_first:\n import operator\n\n already_set_indices: List[int] = []\n\n sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1))\n sorted_x.reverse()\n for tuple in sorted_x:\n # get the span and its label\n label = tuple[0]\n # label = span.get_labels(\"tars_temp_label\")[0].value\n label_length = (\n 0 if not self.prefix else len(label.value.split(\" \")) + len(self.separator.split(\" \"))\n )\n\n # determine whether tokens in this span already have a label\n tag_this = True\n for token in label.span:\n corresponding_token = sentence.get_token(token.idx - label_length)\n if corresponding_token is None:\n tag_this = False\n continue\n if token.idx in already_set_indices:\n tag_this = False\n continue\n\n # only add if all tokens have no label\n if tag_this:\n already_set_indices.extend(token.idx for token in label.span)\n predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span]\n sentence.add_complex_label(\n label_name,\n label=SpanLabel(Span(predicted_span), value=label.value, score=label.score),\n )\n\n # clearing token embeddings to save memory\n store_embeddings(batch, storage_mode=embedding_storage_mode)\n\n if return_loss:\n return overall_loss, overall_count\n\n\nclass TARSClassifier(FewshotClassifier):\n \"\"\"\n TARS model for text classification. In the backend, the model uses a BERT based binary\n text classifier which given a <label, text> pair predicts the probability of two classes\n \"True\", and \"False\". The input data is a usual Sentence object which is inflated\n by the model internally before pushing it through the transformer stack of BERT.\n \"\"\"\n\n static_label_type = \"tars_label\"\n LABEL_MATCH = \"YES\"\n LABEL_NO_MATCH = \"NO\"\n\n def __init__(\n self,\n task_name: Optional[str] = None,\n label_dictionary: Optional[Dictionary] = None,\n label_type: Optional[str] = None,\n embeddings: Union[TransformerDocumentEmbeddings, str] = \"bert-base-uncased\",\n num_negative_labels_to_sample: int = 2,\n prefix: bool = True,\n **tagger_args,\n ):\n \"\"\"\n Initializes a TextClassifier\n :param task_name: a string depicting the name of the task\n :param label_dictionary: dictionary of labels you want to predict\n :param embeddings: name of the pre-trained transformer model e.g.,\n 'bert-base-uncased' etc\n :param num_negative_labels_to_sample: number of negative labels to sample for each\n positive labels against a sentence during training. Defaults to 2 negative\n labels for each positive label. The model would sample all the negative labels\n if None is passed. That slows down the training considerably.\n :param multi_label: auto-detected by default, but you can set this to True\n to force multi-label predictionor False to force single-label prediction\n :param multi_label_threshold: If multi-label you can set the threshold to make predictions\n :param beta: Parameter for F-beta score for evaluation and training annealing\n \"\"\"\n super(TARSClassifier, self).__init__()\n\n if isinstance(embeddings, str):\n embeddings = TransformerDocumentEmbeddings(\n model=embeddings,\n fine_tune=True,\n layers=\"-1\",\n layer_mean=False,\n )\n\n # prepare TARS dictionary\n tars_dictionary = Dictionary(add_unk=False)\n tars_dictionary.add_item(self.LABEL_NO_MATCH)\n tars_dictionary.add_item(self.LABEL_MATCH)\n\n # initialize a bare-bones sequence tagger\n self.tars_model = TextClassifier(\n document_embeddings=embeddings,\n label_dictionary=tars_dictionary,\n label_type=self.static_label_type,\n **tagger_args,\n )\n\n # transformer separator\n self.separator = str(self.tars_embeddings.tokenizer.sep_token)\n if self.tars_embeddings.tokenizer._bos_token:\n self.separator += str(self.tars_embeddings.tokenizer.bos_token)\n\n self.prefix = prefix\n self.num_negative_labels_to_sample = num_negative_labels_to_sample\n\n if task_name and label_dictionary and label_type:\n # Store task specific labels since TARS can handle multiple tasks\n self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)\n else:\n log.info(\n \"TARS initialized without a task. You need to call .add_and_switch_to_new_task() \"\n \"before training this model\"\n )\n\n self.clean_up_labels = True\n\n def _clean(self, label_value: str) -> str:\n if self.clean_up_labels:\n return label_value.replace(\"_\", \" \")\n else:\n return label_value\n\n def _get_tars_formatted_sentence(self, label, sentence):\n\n label = self._clean(label)\n\n original_text = sentence.to_tokenized_string()\n\n label_text_pair = (\n f\"{label} {self.separator} {original_text}\" if self.prefix else f\"{original_text} {self.separator} {label}\"\n )\n\n sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())]\n\n tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH\n\n tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label)\n\n return tars_sentence\n\n def _get_state_dict(self):\n model_state = {\n \"state_dict\": self.state_dict(),\n \"current_task\": self._current_task,\n \"label_type\": self.get_current_label_type(),\n \"label_dictionary\": self.get_current_label_dictionary(),\n \"tars_model\": self.tars_model,\n \"num_negative_labels_to_sample\": self.num_negative_labels_to_sample,\n \"task_specific_attributes\": self._task_specific_attributes,\n }\n return model_state\n\n @staticmethod\n def _init_model_with_state_dict(state):\n\n # init new TARS classifier\n label_dictionary = state[\"label_dictionary\"]\n label_type = \"default_label\" if not state[\"label_type\"] else state[\"label_type\"]\n\n model: TARSClassifier = TARSClassifier(\n task_name=state[\"current_task\"],\n label_dictionary=label_dictionary,\n label_type=label_type,\n embeddings=state[\"tars_model\"].document_embeddings,\n num_negative_labels_to_sample=state[\"num_negative_labels_to_sample\"],\n )\n\n # set all task information\n model._task_specific_attributes = state[\"task_specific_attributes\"]\n\n # linear layers of internal classifier\n model.load_state_dict(state[\"state_dict\"])\n return model\n\n @staticmethod\n def _fetch_model(model_name) -> str:\n\n model_map = {}\n hu_path: str = \"https://nlp.informatik.hu-berlin.de/resources/models\"\n\n model_map[\"tars-base\"] = \"/\".join([hu_path, \"tars-base\", \"tars-base-v8.pt\"])\n\n cache_dir = Path(\"models\")\n if model_name in model_map:\n model_name = cached_path(model_map[model_name], cache_dir=cache_dir)\n\n return model_name\n\n @property\n def tars_embeddings(self):\n return self.tars_model.document_embeddings\n\n def predict(\n self,\n sentences: Union[List[Sentence], Sentence],\n mini_batch_size=32,\n return_probabilities_for_all_classes: bool = False,\n verbose: bool = False,\n label_name: Optional[str] = None,\n return_loss=False,\n embedding_storage_mode=\"none\",\n label_threshold: float = 0.5,\n multi_label: Optional[bool] = None,\n ):\n \"\"\"\n Predict sequence tags for Named Entity Recognition task\n :param sentences: a Sentence or a List of Sentence\n :param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,\n up to a point when it has no more effect.\n :param all_tag_prob: True to compute the score for each tag on each token,\n otherwise only the score of the best tag is returned\n :param verbose: set to True to display a progress bar\n :param return_loss: set to True to return loss\n :param label_name: set this to change the name of the label type that is predicted\n :param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if\n you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.\n 'gpu' to store embeddings in GPU memory.\n \"\"\"\n if label_name is None:\n label_name = self.get_current_label_type()\n\n if multi_label is None:\n multi_label = self.is_current_task_multi_label()\n\n # with torch.no_grad():\n if not sentences:\n return sentences\n\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n # set context if not set already\n previous_sentence = None\n for sentence in sentences:\n if sentence.is_context_set():\n continue\n sentence._previous_sentence = previous_sentence\n sentence._next_sentence = None\n if previous_sentence:\n previous_sentence._next_sentence = sentence\n previous_sentence = sentence\n\n reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)\n\n dataloader = DataLoader(\n dataset=FlairDatapointDataset(reordered_sentences),\n batch_size=mini_batch_size,\n )\n\n # progress bar for verbosity\n if verbose:\n progressbar = tqdm(dataloader)\n progressbar.set_description(\"Batch inference\")\n dataloader = progressbar\n\n overall_loss = 0\n overall_count = 0\n batch_no = 0\n with torch.no_grad():\n for batch in dataloader:\n\n batch_no += 1\n\n batch = self._filter_empty_sentences(batch)\n # stop if all sentences are empty\n if not batch:\n continue\n\n # go through each sentence in the batch\n for sentence in batch:\n\n # always remove tags first\n sentence.remove_labels(label_name)\n\n all_labels = [label.decode(\"utf-8\") for label in self.get_current_label_dictionary().idx2item]\n\n best_label = None\n for label in all_labels:\n tars_sentence = self._get_tars_formatted_sentence(label, sentence)\n\n loss_and_count = self.tars_model.predict(\n tars_sentence,\n label_name=label_name,\n return_loss=True,\n return_probabilities_for_all_classes=True if label_threshold < 0.5 else False,\n )\n\n overall_loss += loss_and_count[0].item()\n overall_count += loss_and_count[1]\n\n # add all labels that according to TARS match the text and are above threshold\n for predicted_tars_label in tars_sentence.get_labels(label_name):\n if (\n predicted_tars_label.value == self.LABEL_MATCH\n and predicted_tars_label.score > label_threshold\n ):\n # do not add labels below confidence threshold\n sentence.add_label(label_name, label, predicted_tars_label.score)\n\n # only use label with highest confidence if enforcing single-label predictions\n if not multi_label:\n if len(sentence.get_labels(label_name)) > 0:\n # get all label scores and do an argmax to get the best label\n label_scores = torch.tensor(\n [label.score for label in sentence.get_labels(label_name)],\n dtype=torch.float,\n )\n best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)]\n\n # remove previously added labels and only add the best label\n sentence.remove_labels(label_name)\n sentence.add_label(\n typename=label_name,\n value=best_label.value,\n score=best_label.score,\n )\n\n # clearing token embeddings to save memory\n store_embeddings(batch, storage_mode=embedding_storage_mode)\n\n if return_loss:\n return overall_loss, overall_count\n"
] |
[
[
"numpy.array",
"numpy.random.choice",
"numpy.sum",
"torch.no_grad",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.preprocessing.minmax_scale",
"torch.argmax"
]
] |
Shivams9/pythoncodecamp
|
[
"3dcec5c529a0847df07c9dcc1424675754ce6376"
] |
[
"ml/Graph/pieChart2.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef f(x):\n return 1-x\ndata=pd.read_csv(\"test.csv\")\nprint(data)\nroll=data[\"Rollno\"]\nt1 =data[\"t1\"]\nt2 = data[\"t2\"]\nprint(roll,t1,t2)\nplt.pie(t1,labels=roll,autopct=\"%1.2f%%\")\n\nplt.title(\"Marks in test1\")\nplt.show()\nplt.pie(t2,labels=roll,autopct=\"%1.2f%%\")\nplt.title(\"Marks in test2\")\nplt.show()\ndata[\"t2-t1\"]=data[\"t2\"]-data[\"t1\"]\nprint(data)\nplt.title(\"Marks in test1\")\nbenefit=0\nnotbenefit=0\nfor i in data['t2-t1']:\n if i>0:\n benefit +=1\n else:\n notbenefit +=1\nprint(benefit,notbenefit)\nplt.pie([benefit,notbenefit],labels=[\"Benefitted\",\"Not Benefitted\"],autopct=\"%1.2f%%\",explode=[0.1,0.1])\nplt.title(\"Deciding\")\nplt.show()\n\nrange=[\"0-15\",\"15-18\",\"18-21\",\"21-23\",\"23-26\"]\nn = [0,0,0,0,0]\nfor i in data[\"t1\"]:\n if i < 15:\n n[0] += 1\n elif i < 18:\n n[1] += 1\n elif i < 21:\n n[2] += 1\n elif i < 23:\n n[3] += 1\n elif i < 26:\n n[4] += 1\n\nplt.pie(n,labels=range,autopct=\"%1.2f%%\")\nplt.show()\nx = np.linspace(0,1,100)\nplt.plot(x,f(x),color=\"red\")\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.title(\"happening Vs Not happening\")\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"numpy.linspace",
"matplotlib.pyplot.show",
"pandas.read_csv"
]
] |
flyingleafe/magenta
|
[
"2eb641e8f48c52e78d6b44fcbe9a7d168f787616"
] |
[
"magenta/models/score2perf/score2perf.py"
] |
[
"# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Performance generation from score in Tensor2Tensor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport itertools\n\nfrom magenta.models.score2perf import datagen_beam\nfrom magenta.models.score2perf import modalities\nfrom magenta.models.score2perf import music_encoders\nfrom note_seq import chord_symbols_lib\nfrom note_seq import sequences_lib\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.layers import modalities as t2t_modalities\nfrom tensor2tensor.models import transformer\nfrom tensor2tensor.utils import registry\nimport tensorflow.compat.v1 as tf\n\n\n# TODO(iansimon): figure out the best way not to hard-code these constants\n\nNUM_VELOCITY_BINS = 32\nSTEPS_PER_SECOND = 100\nMIN_PITCH = 21\nMAX_PITCH = 108\n\n# pylint: disable=line-too-long\nMAESTRO_TFRECORD_PATHS = {\n 'train': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_train.tfrecord',\n 'dev': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_validation.tfrecord',\n 'test': 'gs://magentadata/datasets/maestro/v1.0.0/maestro-v1.0.0_test.tfrecord'\n}\n# pylint: enable=line-too-long\n\n\nclass Score2PerfProblem(problem.Problem):\n \"\"\"Base class for musical score-to-performance problems.\n\n Data files contain tf.Example protos with encoded performance in 'targets' and\n optional encoded score in 'inputs'.\n \"\"\"\n\n @property\n def splits(self):\n \"\"\"Dictionary of split names and probabilities. Must sum to one.\"\"\"\n raise NotImplementedError()\n\n @property\n def min_hop_size_seconds(self):\n \"\"\"Minimum hop size in seconds at which to split input performances.\"\"\"\n raise NotImplementedError()\n\n @property\n def max_hop_size_seconds(self):\n \"\"\"Maximum hop size in seconds at which to split input performances.\"\"\"\n raise NotImplementedError()\n\n @property\n def num_replications(self):\n \"\"\"Number of times entire input performances will be split.\"\"\"\n return 1\n\n @property\n def add_eos_symbol(self):\n \"\"\"Whether to append EOS to encoded performances.\"\"\"\n raise NotImplementedError()\n\n @property\n def absolute_timing(self):\n \"\"\"Whether or not score should use absolute (vs. tempo-relative) timing.\"\"\"\n return False\n\n @property\n def stretch_factors(self):\n \"\"\"Temporal stretch factors for data augmentation (in datagen).\"\"\"\n return [1.0]\n\n @property\n def transpose_amounts(self):\n \"\"\"Pitch transposition amounts for data augmentation (in datagen).\"\"\"\n return [0]\n\n @property\n def random_crop_length_in_datagen(self):\n \"\"\"Randomly crop targets to this length in datagen.\"\"\"\n return None\n\n @property\n def random_crop_in_train(self):\n \"\"\"Whether to randomly crop each training example when preprocessing.\"\"\"\n return False\n\n @property\n def split_in_eval(self):\n \"\"\"Whether to split each eval example when preprocessing.\"\"\"\n return False\n\n def performances_input_transform(self, tmp_dir):\n \"\"\"Input performances beam transform (or dictionary thereof) for datagen.\"\"\"\n raise NotImplementedError()\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n del task_id\n\n def augment_note_sequence(ns, stretch_factor, transpose_amount):\n \"\"\"Augment a NoteSequence by time stretch and pitch transposition.\"\"\"\n augmented_ns = sequences_lib.stretch_note_sequence(\n ns, stretch_factor, in_place=False)\n try:\n _, num_deleted_notes = sequences_lib.transpose_note_sequence(\n augmented_ns, transpose_amount,\n min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,\n in_place=True)\n except chord_symbols_lib.ChordSymbolError:\n raise datagen_beam.DataAugmentationError(\n 'Transposition of chord symbol(s) failed.')\n if num_deleted_notes:\n raise datagen_beam.DataAugmentationError(\n 'Transposition caused out-of-range pitch(es).')\n return augmented_ns\n\n augment_params = itertools.product(\n self.stretch_factors, self.transpose_amounts)\n augment_fns = [\n functools.partial(augment_note_sequence,\n stretch_factor=s, transpose_amount=t)\n for s, t in augment_params\n ]\n\n datagen_beam.generate_examples(\n input_transform=self.performances_input_transform(tmp_dir),\n output_dir=data_dir,\n problem_name=self.dataset_filename(),\n splits=self.splits,\n min_hop_size_seconds=self.min_hop_size_seconds,\n max_hop_size_seconds=self.max_hop_size_seconds,\n min_pitch=MIN_PITCH,\n max_pitch=MAX_PITCH,\n num_replications=self.num_replications,\n encode_performance_fn=self.performance_encoder().encode_note_sequence,\n encode_score_fns=dict((name, encoder.encode_note_sequence)\n for name, encoder in self.score_encoders()),\n augment_fns=augment_fns,\n absolute_timing=self.absolute_timing,\n random_crop_length=self.random_crop_length_in_datagen)\n\n def hparams(self, defaults, model_hparams):\n del model_hparams # unused\n perf_encoder = self.get_feature_encoders()['targets']\n defaults.modality = {'targets': t2t_modalities.ModalityType.SYMBOL}\n defaults.vocab_size = {'targets': perf_encoder.vocab_size}\n if self.has_inputs:\n score_encoder = self.get_feature_encoders()['inputs']\n if isinstance(score_encoder.vocab_size, list):\n # TODO(trandustin): We default to not applying any transformation; to\n # apply one, pass modalities.bottom to the model's hparams.bottom. In\n # future, refactor the tuple of the \"inputs\" feature to be part of the\n # features dict itself, i.e., have multiple inputs each with its own\n # modality and vocab size.\n modality_cls = t2t_modalities.ModalityType.IDENTITY\n else:\n modality_cls = t2t_modalities.ModalityType.SYMBOL\n defaults.modality['inputs'] = modality_cls\n defaults.vocab_size['inputs'] = score_encoder.vocab_size\n\n def performance_encoder(self):\n \"\"\"Encoder for target performances.\"\"\"\n return music_encoders.MidiPerformanceEncoder(\n steps_per_second=STEPS_PER_SECOND,\n num_velocity_bins=NUM_VELOCITY_BINS,\n min_pitch=MIN_PITCH,\n max_pitch=MAX_PITCH,\n add_eos=self.add_eos_symbol)\n\n def score_encoders(self):\n \"\"\"List of (name, encoder) tuples for input score components.\"\"\"\n return []\n\n def feature_encoders(self, data_dir):\n del data_dir\n encoders = {\n 'targets': self.performance_encoder()\n }\n score_encoders = self.score_encoders()\n if score_encoders:\n if len(score_encoders) > 1:\n # Create a composite score encoder, only used for inference.\n encoders['inputs'] = music_encoders.CompositeScoreEncoder(\n [encoder for _, encoder in score_encoders])\n else:\n # If only one score component, just use its encoder.\n _, encoders['inputs'] = score_encoders[0]\n return encoders\n\n def example_reading_spec(self):\n data_fields = {\n 'targets': tf.VarLenFeature(tf.int64)\n }\n for name, _ in self.score_encoders():\n data_fields[name] = tf.VarLenFeature(tf.int64)\n\n # We don't actually \"decode\" anything here; the encodings are simply read as\n # tensors.\n data_items_to_decoders = None\n\n return data_fields, data_items_to_decoders\n\n def preprocess_example(self, example, mode, hparams):\n if self.has_inputs:\n # Stack encoded score components depthwise as inputs.\n inputs = []\n for name, _ in self.score_encoders():\n inputs.append(tf.expand_dims(example[name], axis=1))\n del example[name]\n example['inputs'] = tf.stack(inputs, axis=2)\n\n if self.random_crop_in_train and mode == tf.estimator.ModeKeys.TRAIN:\n # Take a random crop of the training example.\n assert not self.has_inputs\n max_offset = tf.maximum(\n tf.shape(example['targets'])[0] - hparams.max_target_seq_length, 0)\n offset = tf.cond(\n max_offset > 0,\n lambda: tf.random_uniform([], maxval=max_offset, dtype=tf.int32),\n lambda: 0\n )\n example['targets'] = (\n example['targets'][offset:offset + hparams.max_target_seq_length])\n return example\n\n elif self.split_in_eval and mode == tf.estimator.ModeKeys.EVAL:\n # Split the example into non-overlapping segments.\n assert not self.has_inputs\n length = tf.shape(example['targets'])[0]\n extra_length = tf.mod(length, hparams.max_target_seq_length)\n examples = {\n 'targets': tf.reshape(\n example['targets'][:length - extra_length],\n [-1, hparams.max_target_seq_length, 1, 1])\n }\n extra_example = {\n 'targets': tf.reshape(\n example['targets'][-extra_length:], [1, -1, 1, 1])\n }\n dataset = tf.data.Dataset.from_tensor_slices(examples)\n extra_dataset = tf.data.Dataset.from_tensor_slices(extra_example)\n return dataset.concatenate(extra_dataset)\n\n else:\n # If not cropping or splitting, do standard preprocessing.\n return super(Score2PerfProblem, self).preprocess_example(\n example, mode, hparams)\n\n\nclass ConditionalScore2PerfProblem(Score2PerfProblem):\n \"\"\"Lightweight version of base class for musical score-to-performance problems.\n\n This version incorporates one performance conditioning signal.\n Data files contain tf.Example protos with encoded performance in 'targets' and\n optional encoded score in 'inputs'.\n \"\"\"\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n del task_id\n\n def augment_note_sequence(ns, stretch_factor, transpose_amount):\n \"\"\"Augment a NoteSequence by time stretch and pitch transposition.\"\"\"\n augmented_ns = sequences_lib.stretch_note_sequence(\n ns, stretch_factor, in_place=False)\n try:\n _, num_deleted_notes = sequences_lib.transpose_note_sequence(\n augmented_ns, transpose_amount,\n min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,\n in_place=True)\n except chord_symbols_lib.ChordSymbolError:\n raise datagen_beam.DataAugmentationError(\n 'Transposition of chord symbol(s) failed.')\n if num_deleted_notes:\n raise datagen_beam.DataAugmentationError(\n 'Transposition caused out-of-range pitch(es).')\n return augmented_ns\n\n augment_params = itertools.product(\n self.stretch_factors, self.transpose_amounts)\n augment_fns = [\n functools.partial(augment_note_sequence,\n stretch_factor=s, transpose_amount=t)\n for s, t in augment_params\n ]\n\n datagen_beam.generate_conditional_examples(\n input_transform=self.performances_input_transform(tmp_dir),\n output_dir=data_dir,\n problem_name=self.dataset_filename(),\n splits=self.splits,\n min_pitch=MIN_PITCH,\n max_pitch=MAX_PITCH,\n melody=False,\n noisy=False,\n encode_performance_fn=self.performance_encoder().encode_note_sequence,\n encode_score_fns=dict((name, encoder.encode_note_sequence)\n for name, encoder in self.score_encoders()),\n augment_fns=augment_fns,\n num_replications=self.num_replications)\n\n def example_reading_spec(self):\n data_fields = {\n 'inputs': tf.VarLenFeature(tf.int64),\n 'targets': tf.VarLenFeature(tf.int64)\n }\n for name, _ in self.score_encoders():\n data_fields[name] = tf.VarLenFeature(tf.int64)\n\n # We don't actually \"decode\" anything here; the encodings are simply read as\n # tensors.\n data_items_to_decoders = None\n\n return data_fields, data_items_to_decoders\n\n def preprocess_example(self, example, mode, hparams):\n return problem.preprocess_example_common(example, mode, hparams)\n\n\nclass ConditionalMelodyScore2PerfProblem(Score2PerfProblem):\n \"\"\"Lightweight version of base class for musical score-to-performance problems.\n\n This version incorporates one performance conditioning signal.\n Data files contain tf.Example protos with encoded performance in 'targets' and\n encoded score in 'melody' and 'performance'.\n \"\"\"\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n del task_id\n\n def augment_note_sequence(ns, stretch_factor, transpose_amount):\n \"\"\"Augment a NoteSequence by time stretch and pitch transposition.\"\"\"\n augmented_ns = sequences_lib.stretch_note_sequence(\n ns, stretch_factor, in_place=False)\n try:\n _, num_deleted_notes = sequences_lib.transpose_note_sequence(\n augmented_ns, transpose_amount,\n min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,\n in_place=True)\n except chord_symbols_lib.ChordSymbolError:\n raise datagen_beam.DataAugmentationError(\n 'Transposition of chord symbol(s) failed.')\n if num_deleted_notes:\n raise datagen_beam.DataAugmentationError(\n 'Transposition caused out-of-range pitch(es).')\n return augmented_ns\n\n augment_params = itertools.product(\n self.stretch_factors, self.transpose_amounts)\n augment_fns = [\n functools.partial(augment_note_sequence,\n stretch_factor=s, transpose_amount=t)\n for s, t in augment_params\n ]\n datagen_beam.generate_conditional_examples(\n input_transform=self.performances_input_transform(tmp_dir),\n output_dir=data_dir,\n problem_name=self.dataset_filename(),\n splits=self.splits,\n min_pitch=MIN_PITCH,\n max_pitch=MAX_PITCH,\n melody=True,\n noisy=False,\n encode_performance_fn=self.performance_encoder().encode_note_sequence,\n encode_score_fns=dict((name, encoder.encode_note_sequence)\n for name, encoder in self.score_encoders()),\n augment_fns=augment_fns,\n num_replications=self.num_replications)\n\n def hparams(self, defaults, model_hparams):\n del model_hparams # unused\n perf_encoder = self.get_feature_encoders()['targets']\n defaults.modality = {'targets': t2t_modalities.ModalityType.SYMBOL}\n defaults.vocab_size = {'targets': perf_encoder.vocab_size}\n if self.has_inputs:\n score_encoder = self.score_encoders()\n # iterate over each score encoder and update modality/vocab_size\n for name, se in score_encoder:\n defaults.modality[name] = t2t_modalities.ModalityType.SYMBOL\n defaults.vocab_size[name] = se.vocab_size\n\n def feature_encoders(self, data_dir):\n del data_dir\n encoders = {\n 'targets': self.performance_encoder()\n }\n score_encoders = self.score_encoders()\n # CompositeScoreEncoder is tricky, so using a list of encoders instead.\n if len(score_encoders) > 1:\n for name, encoder in score_encoders:\n encoders[name] = encoder\n else:\n # If only one score component, just use its encoder.\n _, encoders['inputs'] = score_encoders[0]\n return encoders\n\n def example_reading_spec(self):\n data_fields = {\n 'targets': tf.VarLenFeature(tf.int64),\n }\n for name, _ in self.score_encoders():\n data_fields[name] = tf.VarLenFeature(tf.int64)\n\n # We don't actually \"decode\" anything here; the encodings are simply read as\n # tensors.\n data_items_to_decoders = None\n\n return data_fields, data_items_to_decoders\n\n def preprocess_example(self, example, mode, hparams):\n return problem.preprocess_example_common(example, mode, hparams)\n\n\nclass ConditionalMelodyNoisyScore2PerfProblem(\n ConditionalMelodyScore2PerfProblem):\n \"\"\"Lightweight version of base class for musical score-to-performance problems.\n\n This version incorporates one performance conditioning signal.\n Data files contain tf.Example protos with encoded performance in 'targets' and\n encoded score in 'melody' and 'performance'.\n \"\"\"\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n del task_id\n\n def augment_note_sequence(ns, stretch_factor, transpose_amount):\n \"\"\"Augment a NoteSequence by time stretch and pitch transposition.\"\"\"\n augmented_ns = sequences_lib.stretch_note_sequence(\n ns, stretch_factor, in_place=False)\n try:\n _, num_deleted_notes = sequences_lib.transpose_note_sequence(\n augmented_ns, transpose_amount,\n min_allowed_pitch=MIN_PITCH, max_allowed_pitch=MAX_PITCH,\n in_place=True)\n except chord_symbols_lib.ChordSymbolError:\n raise datagen_beam.DataAugmentationError(\n 'Transposition of chord symbol(s) failed.')\n if num_deleted_notes:\n raise datagen_beam.DataAugmentationError(\n 'Transposition caused out-of-range pitch(es).')\n return augmented_ns\n\n augment_params = itertools.product(\n self.stretch_factors, self.transpose_amounts)\n augment_fns = [\n functools.partial(augment_note_sequence,\n stretch_factor=s, transpose_amount=t)\n for s, t in augment_params\n ]\n datagen_beam.generate_conditional_examples(\n input_transform=self.performances_input_transform(tmp_dir),\n output_dir=data_dir,\n problem_name=self.dataset_filename(),\n splits=self.splits,\n min_pitch=MIN_PITCH,\n max_pitch=MAX_PITCH,\n melody=True,\n noisy=True,\n encode_performance_fn=self.performance_encoder().encode_note_sequence,\n encode_score_fns=dict((name, encoder.encode_note_sequence)\n for name, encoder in self.score_encoders()),\n augment_fns=augment_fns,\n num_replications=self.num_replications)\n\n\nclass Chords2PerfProblem(Score2PerfProblem):\n \"\"\"Base class for musical chords-to-performance problems.\"\"\"\n\n def score_encoders(self):\n return [('chords', music_encoders.TextChordsEncoder(steps_per_quarter=1))]\n\n\nclass Melody2PerfProblem(Score2PerfProblem):\n \"\"\"Base class for musical melody-to-performance problems.\"\"\"\n\n def score_encoders(self):\n return [\n ('melody', music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH))\n ]\n\n\nclass AbsoluteMelody2PerfProblem(Score2PerfProblem):\n \"\"\"Base class for musical (absolute-timed) melody-to-performance problems.\"\"\"\n\n @property\n def absolute_timing(self):\n return True\n\n def score_encoders(self):\n return [\n ('melody', music_encoders.TextMelodyEncoderAbsolute(\n steps_per_second=10, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH))\n ]\n\n\nclass LeadSheet2PerfProblem(Score2PerfProblem):\n \"\"\"Base class for musical lead-sheet-to-performance problems.\"\"\"\n\n def score_encoders(self):\n return [\n ('chords', music_encoders.TextChordsEncoder(steps_per_quarter=4)),\n ('melody', music_encoders.TextMelodyEncoder(\n steps_per_quarter=4, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH))\n ]\n\n\n@registry.register_problem('score2perf_maestro_language_uncropped_aug')\nclass Score2PerfMaestroLanguageUncroppedAug(Score2PerfProblem):\n \"\"\"Piano performance language model on the MAESTRO dataset.\"\"\"\n\n def performances_input_transform(self, tmp_dir):\n del tmp_dir\n return dict(\n (split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))\n for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())\n\n @property\n def splits(self):\n return None\n\n @property\n def min_hop_size_seconds(self):\n return 0.0\n\n @property\n def max_hop_size_seconds(self):\n return 0.0\n\n @property\n def add_eos_symbol(self):\n return False\n\n @property\n def stretch_factors(self):\n # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.\n return [0.95, 0.975, 1.0, 1.025, 1.05]\n\n @property\n def transpose_amounts(self):\n # Transpose no more than a minor third.\n return [-3, -2, -1, 0, 1, 2, 3]\n\n @property\n def random_crop_in_train(self):\n return True\n\n @property\n def split_in_eval(self):\n return True\n\n\n@registry.register_problem('score2perf_maestro_absmel2perf_5s_to_30s_aug10x')\nclass Score2PerfMaestroAbsMel2Perf5sTo30sAug10x(AbsoluteMelody2PerfProblem):\n \"\"\"Generate performances from an absolute-timed melody, with augmentation.\"\"\"\n\n def performances_input_transform(self, tmp_dir):\n del tmp_dir\n return dict(\n (split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))\n for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())\n\n @property\n def splits(self):\n return None\n\n @property\n def min_hop_size_seconds(self):\n return 5.0\n\n @property\n def max_hop_size_seconds(self):\n return 30.0\n\n @property\n def num_replications(self):\n return 10\n\n @property\n def add_eos_symbol(self):\n return True\n\n @property\n def stretch_factors(self):\n # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.\n return [0.95, 0.975, 1.0, 1.025, 1.05]\n\n @property\n def transpose_amounts(self):\n # Transpose no more than a minor third.\n return [-3, -2, -1, 0, 1, 2, 3]\n\n\n@registry.register_problem('score2perf_maestro_perf_conditional_aug_10x')\nclass Score2PerfMaestroPerfConditionalAug10x(ConditionalScore2PerfProblem):\n \"\"\"Generate performances from scratch (or from primer).\"\"\"\n\n def performances_input_transform(self, tmp_dir):\n del tmp_dir\n return dict(\n (split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))\n for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())\n\n @property\n def splits(self):\n return\n\n @property\n def num_replications(self):\n return 10\n\n @property\n def add_eos_symbol(self):\n return False\n\n @property\n def stretch_factors(self):\n # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.\n return [0.95, 0.975, 1.0, 1.025, 1.05]\n\n @property\n def transpose_amounts(self):\n # Transpose no more than a minor third.\n return [-3, -2, -1, 0, 1, 2, 3]\n\n @property\n def has_inputs(self):\n encoders = self.get_feature_encoders()\n return ('performance' in encoders) or ('inputs' in encoders)\n\n def score_encoders(self):\n return [\n ('performance', music_encoders.MidiPerformanceEncoder(\n steps_per_second=100,\n num_velocity_bins=32,\n min_pitch=21,\n max_pitch=108,\n add_eos=self.add_eos_symbol))\n ]\n\n\n@registry.register_problem('score2perf_maestro_mel_perf_conditional_aug_10x')\nclass Score2PerfMaestroMelPerfConditionalAug10x(\n ConditionalMelodyScore2PerfProblem):\n \"\"\"Generate performances from scratch (or from primer).\"\"\"\n\n def performances_input_transform(self, tmp_dir):\n del tmp_dir\n return dict(\n (split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))\n for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())\n\n @property\n def splits(self):\n return\n\n @property\n def num_replications(self):\n return 10\n\n @property\n def add_eos_symbol(self):\n return False\n\n @property\n def stretch_factors(self):\n # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.\n return [0.95, 0.975, 1.0, 1.025, 1.05]\n\n @property\n def transpose_amounts(self):\n # Transpose no more than a minor third.\n return [-3, -2, -1, 0, 1, 2, 3]\n\n @property\n def has_inputs(self):\n encoders = self.get_feature_encoders()\n return ('performance' in encoders) or ('inputs' in encoders)\n\n def score_encoders(self):\n return [\n ('performance', music_encoders.MidiPerformanceEncoder(\n steps_per_second=100,\n num_velocity_bins=32,\n min_pitch=21,\n max_pitch=108,\n add_eos=self.add_eos_symbol)),\n ('melody', music_encoders.TextMelodyEncoderAbsolute(\n steps_per_second=10, min_pitch=21, max_pitch=108))\n ]\n\n\n@registry.register_problem('score2perf_maestro_mel_perf_conditional_noisy_10x')\nclass Score2PerfMaestroMelPerfConditionalNoisy10x(\n ConditionalMelodyNoisyScore2PerfProblem):\n \"\"\"Generate performances from scratch (or from primer).\"\"\"\n\n def performances_input_transform(self, tmp_dir):\n del tmp_dir\n return dict(\n (split_name, datagen_beam.ReadNoteSequencesFromTFRecord(tfrecord_path))\n for split_name, tfrecord_path in MAESTRO_TFRECORD_PATHS.items())\n\n @property\n def splits(self):\n return\n\n @property\n def num_replications(self):\n return 10\n\n @property\n def add_eos_symbol(self):\n return False\n\n @property\n def stretch_factors(self):\n # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.\n return [0.95, 0.975, 1.0, 1.025, 1.05]\n\n @property\n def transpose_amounts(self):\n # Transpose no more than a minor third.\n return [-3, -2, -1, 0, 1, 2, 3]\n\n @property\n def has_inputs(self):\n encoders = self.get_feature_encoders()\n return ('performance' in encoders) or ('inputs' in encoders)\n\n def score_encoders(self):\n return [\n ('performance', music_encoders.MidiPerformanceEncoder(\n steps_per_second=100,\n num_velocity_bins=32,\n min_pitch=21,\n max_pitch=108,\n add_eos=self.add_eos_symbol)),\n ('melody', music_encoders.TextMelodyEncoderAbsolute(\n steps_per_second=10, min_pitch=21, max_pitch=108))\n ]\n\n\n@registry.register_hparams\ndef score2perf_transformer_base():\n hparams = transformer.transformer_base()\n hparams.bottom['inputs'] = modalities.bottom\n return hparams\n"
] |
[
[
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.VarLenFeature",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.mod",
"tensorflow.compat.v1.random_uniform"
]
] |
sreecodeslayer/udemy-machine-learning
|
[
"11fb166358a29993ed352fb204ab79e04bd9c05e"
] |
[
"00_DataPreprocessing/data_preprocessing_template.py"
] |
[
"# Data Preprocessing Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\n\"\"\"from sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\ny_train = sc_y.fit_transform(y_train)\"\"\""
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
KarlRong/Safe-RL-for-Driving
|
[
"67484911ca8ad9f1476e96043c379c01cd5ced8c"
] |
[
"traci_pedestrian_crossing/movexy_ped.py"
] |
[
"# the TestEnv environment is used to simply simulate the network\nfrom flow.envs import TestEnv\n\n# the Experiment class is used for running simulations\nfrom flow.core.experiment import Experiment\n\n# the base network class\nfrom flow.networks import Network\nfrom flow.envs.base import Env\n\n# all other imports are standard\nfrom flow.core.params import VehicleParams, SumoCarFollowingParams, SumoLaneChangeParams\nfrom flow.controllers import IDMController\nfrom flow.core.params import InFlows\nfrom flow.core.params import NetParams\nfrom flow.core.params import TrafficLightParams\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import EnvParams\n\nfrom flow.controllers import IDMController, RLController, StaticLaneChanger\n\nfrom gym.spaces.box import Box\nimport numpy as np\nimport collections\n\n# create some default parameters parameters\nHORIZON = 3000\nenv_params = EnvParams(\n horizon=HORIZON,\n sims_per_step=1,\n warmup_steps=0,\n additional_params={\n \"max_accel\": 3,\n \"max_decel\": -2,\n \"target_velocity\": 20,\n \"lane_change_duration\": 4,\n \"num_rl\": 5,\n })\ninitial_config = InitialConfig(edges_distribution=['highway_0'])\n\nvehicles = VehicleParams()\nvehicles.add(\n veh_id=\"human\",\n acceleration_controller=(IDMController, {\n \"noise\": 0.2\n }),\n # lane_change_controller=(StaticLaneChanger, {}),\n car_following_params=SumoCarFollowingParams(\n speed_mode=\"obey_safe_speed\",\n ),\n lane_change_params=SumoLaneChangeParams(\n lane_change_mode=1621,\n model=\"SL2015\",\n lc_impatience=\"0.1\",\n lc_time_to_impatience=\"1.0\"\n ))\nvehicles.add(\n veh_id=\"rl\",\n acceleration_controller=(RLController, {}),\n lane_change_controller=(StaticLaneChanger, {}),\n # routing_controller=(HighwayRouter, {}),\n car_following_params=SumoCarFollowingParams(\n speed_mode=\"obey_safe_speed\",\n ),\n lane_change_params=SumoLaneChangeParams(\n lane_change_mode=256,\n model=\"SL2015\",\n lc_impatience=\"0.1\",\n lc_time_to_impatience=\"1.0\"\n ),\n num_vehicles=0)\n\nfrom flow.core.params import SumoParams\n\nsim_params = SumoParams(\n sim_step=0.2,\n render=True,\n lateral_resolution=1.0,\n restart_instance=True,\n )\n\nimport os\n\ninflow = InFlows()\ninflow.add(veh_type=\"human\",\n edge=\"WC\",\n # depart_lane=\"best\",\n depart_lane=1,\n arrivalLane=0,\n probability=0.1,\n depart_speed=\"random\",\n )\ninflow.add(veh_type=\"human\",\n edge=\"WC\",\n # depart_lane=\"best\",\n depart_lane=0,\n arrivalLane=1,\n probability=0.1,\n depart_speed=\"random\",\n )\ninflow.add(veh_type=\"human\",\n edge=\"EC\",\n # depart_lane=\"best\",\n # vehs_per_hour=2000,\n depart_lane=1,\n arrivalLane=0,\n probability=0.1,\n depart_speed=\"random\",\n )\ninflow.add(veh_type=\"human\",\n edge=\"EC\",\n # depart_lane=\"best\",\n # vehs_per_hour=2000,\n depart_lane=0,\n arrivalLane=1,\n probability=0.1,\n depart_speed=\"random\",\n )\ninflow.add(\n veh_type=\"rl\",\n edge=\"WC\",\n vehs_per_hour=100,\n depart_lane=\"free\",\n depart_speed=5)\n\nnet_params = NetParams(\n template={\n \"net\":\"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedcrossing.net.xml\",\n # features associated with the routes vehicles take\n \"vtype\": \"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedcrossing.add.xml\",\n # ๅไธๆนspecify_routesไธ่ด\n \"rou\":\"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/data/pedcrossing.rou.xml\",\n \"trip\":\"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedestrians.trip.xml\"\n },\n inflows=inflow,\n)\n\n# specify the edges vehicles can originate on\ninitial_config = InitialConfig(\n edges_distribution=[\"WC\"]\n)\n\ntl_logic = TrafficLightParams(baseline=False)\nphases = [{\"duration\": \"100000\", \"state\": \"GGGGr\"},\n {\"duration\": \"4\", \"state\": \"yyyyr\"},\n {\"duration\": \"10\", \"state\": \"rrrrG\"},\n {\"duration\": \"10\", \"state\": \"rrrrr\"}]\ntl_logic.add(\"C\", phases=phases, programID=\"custom\", offset=\"0\")\n\n\n# specify the routes for vehicles in the network\nclass PedCrossing(Network):\n\n def specify_routes(self, net_params):\n return {'EC': ['EC', 'CW'],\n 'WC': ['WC', 'CE']}\n\n\nclass MoveXYPedEnv(Env):\n\n def __init__(self, env_params, sim_params, network, simulator='traci'):\n super().__init__(env_params, sim_params, network, simulator)\n # ็ฏๅข็ธๅ
ณ\n self.activeRequest = False\n self.greenTimeSoFar = 0\n # minimum green time for the vehicles\n self.MIN_GREEN_TIME = 15\n # the first phase in tls plan. see 'pedcrossing.tll.xml'\n self.VEHICLE_GREEN_PHASE = 0\n self.PEDESTRIAN_GREEN_PHASE = 2\n # the id of the traffic light (there is only one). This is identical to the\n # id of the controlled intersection (by default)\n self.TLSID = 'C'\n # pedestrian edges at the controlled intersection\n self.WALKINGAREAS = [':C_w0', ':C_w1']\n self.CROSSINGS = [':C_c0']\n # Move xy็ธๅ
ณ\n self.num_lanes = max(self.k.network.num_lanes(edge)\n for edge in self.k.network.get_edge_list())\n self.visible = []\n self.stuck = False\n # variables used to sort vehicles by their initial position plus\n # distance traveled\n self.prev_pos = dict()\n self.absolute_position = dict()\n\n # maximum number of controlled vehicles\n self.num_rl = env_params.additional_params[\"num_rl\"]\n\n # queue of rl vehicles waiting to be controlled\n self.rl_queue = collections.deque()\n\n # names of the rl vehicles controlled at any step\n self.rl_veh = []\n\n # used for visualization: the vehicles behind and after RL vehicles\n # (ie the observed vehicles) will have a different color\n self.leader = []\n self.follower = []\n\n @property\n def action_space(self):\n \"\"\"See class definition.\"\"\"\n max_decel = self.env_params.additional_params[\"max_decel\"]\n max_accel = self.env_params.additional_params[\"max_accel\"]\n\n lb = [1, -0.2] * self.num_rl\n ub = [2, 0.2] * self.num_rl\n # print(\"num_rl_vehicles:\", self.num_rl)\n return Box(np.array(lb), np.array(ub), dtype=np.float32)\n\n @property\n def observation_space(self):\n \"\"\"See class definition.\"\"\"\n # print(\"observation sapce shape: \", 4 * self.num_rl *\n # self.num_lanes + self.num_rl)\n return Box(\n low=-1000,\n high=3000,\n shape=(4 * self.num_rl *\n self.num_lanes + 2 * self.num_rl, ),\n dtype=np.float32)\n\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"See class definition.\"\"\"\n reward = 0\n\n # rl ่ฝฆ่พๅๅ๏ผๅนถๆฉ็ฝๅๆญข\n rl_velocity = np.array(self.k.vehicle.get_speed(self.rl_veh))\n target_vel = self.env_params.additional_params['target_velocity']\n max_cost = np.array([target_vel] * self.num_rl)\n max_cost = np.linalg.norm(max_cost)\n\n cost = rl_velocity - target_vel\n cost = np.linalg.norm(cost)\n # epsilon term (to deal with ZeroDivisionError exceptions)\n eps = np.finfo(np.float32).eps\n reward += max(max_cost - cost, 0) / (max_cost + eps)\n\n gain = 0.5\n thresh = 0.3\n penalize = len(rl_velocity[rl_velocity < thresh])\n reward -= gain * penalize\n\n # punish excessive lane changes by reducing the reward by a set value\n # every time an rl car changes lanes (10% of max reward)\n for veh_id in self.rl_veh:\n if self.k.vehicle.get_last_lc(veh_id) == self.time_counter:\n reward -= 10\n\n if self.stuck:\n reward -= 100\n # print(\"reward: \", reward)\n return reward\n\n def _apply_rl_actions(self, actions):\n \"\"\"See class definition.\"\"\"\n acceleration = actions[::2]\n direction = actions[1::2]\n\n # represents vehicles that are allowed to change lanes\n # non_lane_changing_veh = []\n # non_lane_changing_veh = \\\n # [self.time_counter <=\n # self.env_params.additional_params[\"lane_change_duration\"]\n # + self.k.vehicle.get_last_lc(veh_id)\n # for veh_id in self.rl_veh]\n # # vehicle that are not allowed to change have their directions set to 0\n # print(non_lane_changing_veh)\n # direction[non_lane_changing_veh] = \\\n # np.array([0] * sum(non_lane_changing_veh))\n for i, veh_id in enumerate(self.rl_veh):\n if self.time_counter <= self.env_params.additional_params[\"lane_change_duration\"]\\\n + self.k.vehicle.get_last_lc(veh_id):\n direction[i] = 0\n x, y = self.k.vehicle.kernel_api.vehicle.getPosition(veh_id)\n print(x, y)\n print(\"edgeID\", self.k.vehicle.get_edge(veh_id))\n print(\"lane\", self.k.vehicle.get_lane(veh_id))\n self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=veh_id,\n edgeID=\"highway_1\",\n lane=1,\n x=x+acceleration[i],\n y=y+direction[i],\n keepRoute=2)\n\n for x in np.nditer(direction, op_flags=['readwrite']):\n if x > 0.7:\n x[...] = 1\n elif x < -0.7:\n x[...] = -1\n else:\n x[...] = 0\n\n # print(\"actions:\", actions)\n # print(\"veh id: \", self.rl_veh)\n # print(\"acceleration: \", acceleration)\n # print(\"direction\", direction)\n\n # self.k.vehicle.apply_acceleration(self.rl_veh, acc=acceleration)\n # self.k.vehicle.apply_lane_change(self.rl_veh, direction=direction)\n\n def get_state(self):\n \"\"\"See class definition.\"\"\"\n obs = [\n 0\n for _ in range(4 * self.num_rl * self.num_lanes + 2 * self.num_rl)\n ]\n # print(\"rl veh id: \", self.rl_veh)\n self.visible = []\n self.update_veh_id()\n speeds = []\n for i, rl_id in enumerate(self.rl_veh):\n # x, y = self.k.vehicle.kernel_api.vehicle.getPosition(rl_id)\n # print(x, y)\n # print(\"edgeID\", self.k.vehicle.get_edge(rl_id))\n # print(\"lane\", self.k.vehicle.get_lane(rl_id))\n # self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=[rl_id, rl_id], edgeID=\"highway_1\", lane=1, x=600, y=134)\n # add the speed for the ego rl vehicle\n x = self.k.vehicle.get_x_by_id(rl_id)\n if x == -1001:\n continue\n speed = self.k.vehicle.get_speed(rl_id)\n obs[-2*i - 1] = speed\n speeds.append(speed)\n obs[-2*i - 2] = x\n\n # if rl_id not in self.k.vehicle.get_ids():\n # print(\"not in:\", rl_id)\n # self.additional_command()\n # normalizers\n max_length = self.k.network.length()\n max_speed = self.k.network.max_speed()\n\n # set to 1000 since the absence of a vehicle implies a large\n # headway\n headway = [1] * self.num_lanes\n tailway = [1] * self.num_lanes\n vel_in_front = [0] * self.num_lanes\n vel_behind = [0] * self.num_lanes\n\n lane_leaders = self.k.vehicle.get_lane_leaders(rl_id)\n lane_followers = self.k.vehicle.get_lane_followers(rl_id)\n lane_headways = self.k.vehicle.get_lane_headways(rl_id)\n lane_tailways = self.k.vehicle.get_lane_tailways(rl_id)\n headway[0:len(lane_headways)] = lane_headways\n tailway[0:len(lane_tailways)] = lane_tailways\n\n for j, lane_leader in enumerate(lane_leaders):\n if lane_leader != '':\n lane_headways[j] /= max_length\n vel_in_front[j] = self.k.vehicle.get_speed(lane_leader) \\\n / max_speed\n self.visible.extend([lane_leader])\n for j, lane_follower in enumerate(lane_followers):\n if lane_follower != '':\n lane_headways[j] /= max_length\n vel_behind[j] = self.k.vehicle.get_speed(lane_follower) \\\n / max_speed\n self.visible.extend([lane_follower])\n\n # add the headways, tailways, and speed for all lane leaders\n # and followers\n obs[4*self.num_lanes*i:4*self.num_lanes*(i+1)] = \\\n np.concatenate((headway, tailway, vel_in_front, vel_behind))\n\n # if len(speeds) > 3:\n # self.stuck = True\n # for speed in speeds:\n # if speed != 0:\n # self.stuck = False\n obs = np.array(obs)\n # print(\"observation: \", obs)\n # print(\"observation shape: \", obs.shape)\n np.clip(obs, -1000, 3000, out=obs)\n return obs\n\n def additional_command(self):\n # ็บข็ปฟ็ฏ็ธๅ
ณ\n # decide wether there is a waiting pedestrian and switch if the green\n # phase for the vehicles exceeds its minimum duration\n if not self.activeRequest:\n self.activeRequest = self.checkWaitingPersons()\n if self.k.kernel_api.trafficlight.getPhase(self.TLSID) == self.VEHICLE_GREEN_PHASE:\n self.greenTimeSoFar += 1\n if self.greenTimeSoFar > self.MIN_GREEN_TIME:\n # check whether someone has pushed the button\n\n if self.activeRequest:\n # switch to the next phase\n self.k.kernel_api.trafficlight.setPhase(\n self.TLSID, self.VEHICLE_GREEN_PHASE + 1)\n # reset state\n self.activeRequest = False\n # MOVE XY็ธๅ
ณ\n # specify observed vehicles\n for veh_id in self.leader + self.follower:\n self.k.vehicle.set_observed(veh_id)\n\n # update the \"absolute_position\" variable\n for veh_id in self.k.vehicle.get_ids():\n this_pos = self.k.vehicle.get_x_by_id(veh_id)\n\n if this_pos == -1001:\n # in case the vehicle isn't in the network\n self.absolute_position[veh_id] = -1001\n else:\n change = this_pos - self.prev_pos.get(veh_id, this_pos)\n self.absolute_position[veh_id] = \\\n (self.absolute_position.get(veh_id, this_pos) + change) \\\n % self.k.network.length()\n self.prev_pos[veh_id] = this_pos\n return\n\n def update_veh_id(self):\n # add rl vehicles that just entered the network into the rl queue\n for veh_id in self.k.vehicle.get_rl_ids():\n if veh_id not in list(self.rl_queue) + self.rl_veh:\n self.rl_queue.append(veh_id)\n\n # remove rl vehicles that exited the network\n for veh_id in list(self.rl_queue):\n if veh_id not in self.k.vehicle.get_rl_ids() or veh_id not in self.k.vehicle.get_ids():\n self.rl_queue.remove(veh_id)\n for veh_id in self.rl_veh:\n if veh_id not in self.k.vehicle.get_rl_ids() or veh_id not in self.k.vehicle.get_ids():\n # print(\"rm veh_id\", veh_id)\n self.rl_veh.remove(veh_id)\n\n # fil up rl_veh until they are enough controlled vehicles\n while len(self.rl_queue) > 0 and len(self.rl_veh) < self.num_rl:\n rl_id = self.rl_queue.popleft()\n self.rl_veh.append(rl_id)\n # print(\"add rl_veh:\", rl_id)\n # print(\"update_veh_id, self.rl_veh:\", self.rl_veh)\n\n def checkWaitingPersons(self):\n \"\"\"check whether a person has requested to cross the street\"\"\"\n\n # check both sides of the crossing\n for edge in self.WALKINGAREAS:\n peds = self.k.kernel_api.edge.getLastStepPersonIDs(edge)\n # check who is waiting at the crossing\n # we assume that pedestrians push the button upon\n # standing still for 1s\n for ped in peds:\n if (self.k.kernel_api.person.getWaitingTime(ped) == 1 and\n self.k.kernel_api.person.getNextEdge(ped) in self.CROSSINGS):\n numWaiting = self.k.kernel_api.trafficlight.getServedPersonCount(self.TLSID, self.PEDESTRIAN_GREEN_PHASE)\n print(\"%s: pedestrian %s pushes the button (waiting: %s)\" %\n (self.k.kernel_api.simulation.getTime(), ped, numWaiting))\n return True\n return False\n\n def step(self, rl_actions):\n \"\"\"Advance the environment by one step.\n\n Assigns actions to autonomous and human-driven agents (i.e. vehicles,\n traffic lights, etc...). Actions that are not assigned are left to the\n control of the simulator. The actions are then used to advance the\n simulator by the number of time steps requested per environment step.\n\n Results from the simulations are processed through various classes,\n such as the Vehicle and TrafficLight kernels, to produce standardized\n methods for identifying specific network state features. Finally,\n results from the simulator are used to generate appropriate\n observations.\n\n Parameters\n ----------\n rl_actions : array_like\n an list of actions provided by the rl algorithm\n\n Returns\n -------\n observation : array_like\n agent's observation of the current environment\n reward : float\n amount of reward associated with the previous state/action pair\n done : bool\n indicates whether the episode has ended\n info : dict\n contains other diagnostic information from the previous action\n \"\"\"\n for _ in range(self.env_params.sims_per_step):\n self.time_counter += 1\n self.step_counter += 1\n\n # perform acceleration actions for controlled human-driven vehicles\n if len(self.k.vehicle.get_controlled_ids()) > 0:\n accel = []\n for veh_id in self.k.vehicle.get_controlled_ids():\n action = self.k.vehicle.get_acc_controller(\n veh_id).get_action(self)\n accel.append(action)\n self.k.vehicle.apply_acceleration(\n self.k.vehicle.get_controlled_ids(), accel)\n\n # perform lane change actions for controlled human-driven vehicles\n if len(self.k.vehicle.get_controlled_lc_ids()) > 0:\n direction = []\n for veh_id in self.k.vehicle.get_controlled_lc_ids():\n target_lane = self.k.vehicle.get_lane_changing_controller(\n veh_id).get_action(self)\n direction.append(target_lane)\n self.k.vehicle.apply_lane_change(\n self.k.vehicle.get_controlled_lc_ids(),\n direction=direction)\n\n # perform (optionally) routing actions for all vehicles in the\n # network, including RL and SUMO-controlled vehicles\n routing_ids = []\n routing_actions = []\n for veh_id in self.k.vehicle.get_ids():\n if self.k.vehicle.get_routing_controller(veh_id) \\\n is not None:\n routing_ids.append(veh_id)\n route_contr = self.k.vehicle.get_routing_controller(\n veh_id)\n routing_actions.append(route_contr.choose_route(self))\n\n self.k.vehicle.choose_routes(routing_ids, routing_actions)\n\n self.apply_rl_actions(rl_actions)\n\n self.additional_command()\n\n # advance the simulation in the simulator by one step\n self.k.simulation.simulation_step()\n\n # store new observations in the vehicles and traffic lights class\n self.k.update(reset=False)\n\n # update the colors of vehicles\n if self.sim_params.render:\n self.k.vehicle.update_vehicle_colors()\n\n # crash encodes whether the simulator experienced a collision\n crash = self.k.simulation.check_collision()\n\n # stop collecting new simulation steps if there is a collision\n if crash:\n break\n\n # render a frame\n self.render()\n\n states = self.get_state()\n\n # collect information of the state of the network based on the\n # environment class used\n self.state = np.asarray(states).T\n\n # collect observation new state associated with action\n next_observation = np.copy(states)\n\n # test if the environment should terminate due to a collision or the\n # time horizon being met\n done = (self.time_counter >= self.env_params.warmup_steps +\n self.env_params.horizon) or self.stuck\n\n if done:\n print(\"done\")\n if self.stuck:\n print(\"stuck\")\n else:\n print(\"time up\")\n # compute the info for each agent\n infos = {}\n\n # compute the reward\n if self.env_params.clip_actions:\n rl_clipped = self.clip_actions(rl_actions)\n reward = self.compute_reward(rl_clipped, fail=crash)\n else:\n reward = self.compute_reward(rl_actions, fail=crash)\n\n return next_observation, reward, done, infos\n\n def reset(self):\n \"\"\"See parent class.\n\n This also includes updating the initial absolute position and previous\n position.\n \"\"\"\n self.rl_queue.clear()\n self.rl_veh.clear()\n obs = super().reset()\n print(\"reset\")\n for veh_id in self.k.vehicle.get_ids():\n self.absolute_position[veh_id] = self.k.vehicle.get_x_by_id(veh_id)\n self.prev_pos[veh_id] = self.k.vehicle.get_x_by_id(veh_id)\n\n self.leader = []\n self.follower = []\n return obs\n\n\nif __name__ == \"__main__\":\n flow_params = dict(\n exp_tag='template',\n env_name=MoveXYPedEnv,\n network=PedCrossing,\n simulator='traci',\n sim=sim_params,\n env=env_params,\n net=net_params,\n veh=vehicles,\n initial=initial_config,\n tls=tl_logic,\n )\n\n # number of time steps\n flow_params['env'].horizon = 10000\n exp = Experiment(flow_params)\n\n # run the sumo simulation\n _ = exp.run(1)\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.nditer",
"numpy.asarray",
"numpy.copy",
"numpy.finfo",
"numpy.clip"
]
] |
schreiber-lab/mlreflect
|
[
"88a80ccac48461cc8934a46041726b70e469c6b8"
] |
[
"mlreflect/curve_fitter/minimizer.py"
] |
[
"import numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom ..data_generation import interp_reflectivity, ReflectivityGenerator\n\n\ndef q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):\n \"\"\"Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.\"\"\"\n shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)\n shifted_qs = np.tile(q_values_input, (n_variants, 1)) + shift\n\n interpolated_curves = np.zeros((n_variants, len(q_values_prediction)))\n for i in range(n_variants):\n interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity)\n return interpolated_curves, shift\n\n\ndef curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):\n \"\"\"Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.\"\"\"\n scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)\n scaled_curves = np.zeros((n_variants, len(corrected_reflectivity)))\n for i in range(n_variants):\n scaled_curves[i] = corrected_reflectivity.copy() * scalings[i]\n return scaled_curves, scalings\n\n\ndef curve_variant_log_mse(curve, variant_curves):\n \"\"\"Calculate the log MSE of a curve and a :class:`ndarray` of curves\"\"\"\n errors = np.log10(curve) - np.log10(variant_curves)\n return np.mean(errors ** 2, axis=1)\n\n\ndef least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor,\n fraction_bounds=(0.5, 0.5, 0.1)):\n \"\"\"Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.\"\"\"\n prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0]\n start_values = np.array(prep_labels)[0]\n bounds = ([val - bound * abs(val) for val, bound in zip(start_values, fraction_bounds)],\n [val + bound * abs(val) for val, bound in zip(start_values, fraction_bounds)])\n fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data),\n p0=start_values, bounds=bounds)\n return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))\n\n\ndef fitting_model(q_values, sample, output_preprocessor):\n def log_refl_curve(q, *prep_labels):\n generator = ReflectivityGenerator(q_values, sample)\n restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))\n model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0]\n return np.log10(model)\n\n return log_refl_curve\n\n\ndef log_mse_loss(prep_labels, data, generator, output_preprocessor):\n \"\"\"MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.\"\"\"\n restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))\n model = generator.simulate_reflectivity(restored_labels,\n progress_bar=False)[0]\n loss = mean_squared_error(np.log10(data), np.log10(model))\n return loss\n\n\ndef mean_squared_error(array1, array2):\n \"\"\"Returns element-wise mean squared error between two arrays.\"\"\"\n if len(array1) != len(array2):\n raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')\n else:\n error = np.asarray(array1) - np.asarray(array2)\n return np.mean(np.atleast_2d(error ** 2), axis=1)\n"
] |
[
[
"numpy.random.normal",
"numpy.array",
"numpy.asarray",
"numpy.tile",
"numpy.mean",
"numpy.log10",
"numpy.atleast_2d"
]
] |
garethnisbet/T-BOTS
|
[
"70e211191cc6c713084836bff89241e811667378"
] |
[
"Python/Development/T-Bot_Tracking/getHSVThresh.py"
] |
[
"#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n# from scipy import ndimage\nmaskgridL = np.meshgrid(np.r_[0:359],np.r_[0:130])\nmaskgridR = np.meshgrid(np.r_[0:359],np.r_[639-130:639])\n\n# key value\n# cam.set(3 , 640) # width \n# cam.set(4 , 480) # height \n# cam.set(10, 120) # brightness min: 0 , max: 255 , increment:1 \n# cam.set(11, 50) # contrast min: 0 , max: 255 , increment:1 \n# cam.set(12, 70) # saturation min: 0 , max: 255 , increment:1\n# cam.set(13, 13) # hue \n# cam.set(14, 50) # gain min: 0 , max: 127 , increment:1\n# cam.set(15, -3) # exposure min: -7 , max: -1 , increment:1\n# cam.set(17, 5000) # white_balance min: 4000, max: 7000, increment:1\n# cam.set(28, 0) # focus min: 0 , max: 255 , increment:5\n\ndef callback(value):\n pass\n\ndef setup_trackbars(range_filter):\n cv2.namedWindow(\"Thresholds\",cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"Thresholds\", 720, 720)\n for i in [\"MIN\", \"MAX\"]:\n v = 0 if i == \"MIN\" else 255\n for j in range_filter:\n cv2.createTrackbar(\"%s_%s\" % (j, i), \"Thresholds\", v, 255, callback)\n\ndef get_trackbar_values(range_filter):\n values = []\n for i in [\"MIN\", \"MAX\"]:\n for j in range_filter:\n v = cv2.getTrackbarPos(\"%s_%s\" % (j, i), \"Thresholds\")\n values.append(v)\n return values\n\n\ngot_lowpass = 0\n# range_filter = 'RGB'\nrange_filter = 'HSV'\ncam = cv2.VideoCapture(0,cv2.CAP_V4L2)\ncam.set(cv2.CAP_PROP_AUTOFOCUS, 0)\ncam.set(28, 0)\ncam.set(cv2.CAP_PROP_GAIN,0)\ncam.set(cv2.CAP_PROP_BRIGHTNESS,0)\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)\ncam.set(cv2.CAP_PROP_BRIGHTNESS, 100)\n\nsetup_trackbars(range_filter)\n\n\nwhile True:\n success, image = cam.read()\n # image[maskgridL] = 0\n # image[maskgridR] = 0\n if range_filter == 'RGB':\n frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n else:\n frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(range_filter)\n thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))\n preview = cv2.bitwise_and(image, image, mask=thresh)\n cv2.imshow(\"Thresholds\", preview)\n\n if cv2.waitKey(1) & 0xFF is ord('q'):\n cam.release()\n cv2.destroyAllWindows()\n break\n"
] |
[
[
"numpy.meshgrid"
]
] |
AleksiNummelin/coupled_channel
|
[
"0e96e54400bb853b8c42cfc55b968a476114dcef"
] |
[
"coupled_channel/cutils.py"
] |
[
"#from numba import jit\nimport numpy as np\n#from joblib import Parallel, delayed, parallel_backend\n#from joblib import load, dump\n#import tempfile\n#import shutil\n#import os\n#\n#import sys\n#sys.path.append('pyunicorn_timeseries')\n#from pyunicorn_timeseries.surrogates import Surrogates\n\ndef set_model_constants(xx=50.E3,nx=100,va=10.,tmax=60*360*24*3600.,avep=24*3600.,dt=3600.,period=3600*24*360*1,B=2.,T0=273.15+6,dT=2.,Cs=1.E-3,Cp=1030.,ra=1.5,ro=1030.,ri=900.,Cpo=4.E3,Cpi=2.9E3,H=200.,vo=0.2,Hb=1.E3,Li=3.3E6,Tf=273.15-1.8,SW0=50.,SW_anom=100.,emissivity=0.99,Da=1.E6,Do=5.E2,tau_entrainment=30*24*3600.,**args):\n '''Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.'''\n #\n C={}\n C['xx'] = xx #grid size in [m]\n C['nx'] = nx #number of grid cell - the total width of the domain is xx*nx long\n C['va'] = va #wind in m/s\n #\n C['tmax'] = tmax #tmax seconds\n C['dt'] = dt #timestep\n #\n C['avep'] = avep #averaging period in seconds \n #\n C['period'] = period #period of boundary restoring\n C['Cs'] = Cs #exchange coefficient for bulk formula\n C['Cp'] = Cp #air heat capacity\n C['ra'] = ra #density of air [kg/m3]\n C['ro'] = ro #density of sea water [kg/m3]\n C['ri'] = ri #density of sea ice [kg/m3]\n C['Cpo'] = Cpo #sea water heat capacity\n C['T0'] = T0 #initial temp in degC\n C['dT'] = dT #initial temp perturbationHb=2E3\n C['H'] = H #mixed layer depth in ocean [m]\n C['vo'] = vo #ocean current speed [m/s]\n C['Hb'] = Hb #boundary layer height in the atmosphere [m]\n C['Cpi'] = Cpi #sea ice heat capacity [J/ Kg K]\n C['Li'] = Li #Latent heat of fusion of sea water [J / kg K]\n C['Tf'] = Tf #Freezing point of sea water [C]\n C['B'] = B # long-wave radiation constant [W/m2]\n C['emissivity'] = emissivity #surface emissivity\n C['SW0'] = SW0 # background net downwelling SW radiation\n C['SW_anom']= SW_anom # amplitude of annual cycle in SW radiation\n C['Da'] = Da # atmospheric diffusion [m2/s]\n C['Do'] = Do # ocean diffusion [m2/s]\n C['tau_entrainment'] = tau_entrainment # ocean entrainment/damping timescale\n \n for var in args.keys():\n C[var]=args[var]\n #\n return C\n\n \ndef CoupledChannel(C,forcing, T_boundary=None, dt_f=30*24*3600, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):\n '''\n This is the main function for the coupled ocean--atm channel model.\n \n ## INPUT VARIABLES ##\n \n tmax: running time in seconds\n avep: averaging period for the ouput\n T0: initial temperature\n forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2\n dt_f: timestep of the forcing\n atm_adv: boolean, advective atmosphere\n atm_ocn: boolean, advective ocean\n '''\n #\n # number of simulation timesteps and output timesteps\n nt = int(C['tmax']/C['dt']) #simulation\n nt1 = int(C['tmax']/C['avep']) #output\n # rtas = np.random.rand(C['nx'])\n # intitialize the model variables, first dimension is due to 2 timesteps deep scheme\n sst = C['T0']*np.ones((2,C['nx']))\n tas = C['T0']*np.ones((2,C['nx'])) #+rtas\n hice = np.zeros((2,C['nx']))\n # INCOMING SHORTWAVE RADIATION\n SW0 = np.tile(C['SW0'][:,np.newaxis],(1,nt))\n naxis = np.tile(np.arange(nt)[np.newaxis,],(C['nx'],1))\n SW_warming = np.max(np.concatenate([(SW0-C['SW_anom']*np.cos(2*np.pi*(naxis*C['dt'])/(360*24*3600)))[np.newaxis,],np.zeros((C['nx'],nt))[np.newaxis,]],axis=0),0)\n # If boundary conditions are not defined, then set initially to T0\n if np.all(T_boundary==None):\n T_boundary=C['T0']*np.ones(nt)\n #\n sst_boundary=T_boundary[0]*np.ones((2)) #nt+1\n # evolve_boundary=True\n #else:\n # sst_boundary=np.concatenate((sst_boundary[np.newaxis,],sst_boundary[np.newaxis,]),axis=0)\n # evolve_boundary=False\n #\n # interpolate forcing to the new timescale\n if np.all(forcing!=None):\n forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)\n else:\n forcing = np.zeros(nt+1)\n #\n # initialize outputs\n sst_out = np.zeros((nt1,C['nx']))\n tas_out = np.zeros((nt1,C['nx']))\n hice_out = np.zeros((nt1,C['nx']))\n sflx_f_out = np.zeros((nt1,C['nx'])) #forcing\n sflx_out = np.zeros((nt1,C['nx']))\n # spatial pattern of the forcing - assume a sine wave\n if np.all(spatial_pattern==None):\n spatial_pattern=np.ones(C['nx'])\n #\n if np.all(atm_DA_tendencies!=None):\n use_atm_tendencies=True\n else:\n use_atm_tendencies=False\n if np.all(ocn_DA_tendencies!=None):\n use_ocn_tendencies=True\n else:\n use_ocn_tendencies=False\n #\n if return_coupled_fluxes:\n atm_DA_tendencies = np.zeros((nt,C['nx']))\n ocn_DA_tendencies = np.zeros((nt,C['nx']))\n\n # initialize counters\n c=0; c2=0; c3=0; n=1\n #####################\n # --- TIME LOOP ---\n #####################\n for nn in range(nt):\n #\n # FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified\n sflx=forcing[nn]*spatial_pattern #+ forcing[nn]*random_amp*np.random.rand(C['nx'])\n #\n # save the forcing component\n #\n sflx_f_out[c,:]=sflx_f_out[c,:]+sflx\n #\n # SURFACE HEAT FLUXES\n # Add sensible heat flux to the total surface flux in W/m**-2\n sflx=sflx+C['ra']*C['Cp']*C['va']*C['Cs']*(sst[n-1,:]-tas[n-1,:])\n # RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean\n LW_cooling = C['emissivity']*5.67E-8*(tas[n-1,:]**4)\n #\n # OCEAN BOUNDARY CONDITION\n #if evolve_boundary:\n sst_boundary_tendency=SW_warming[0,nn]*C['dt']/(C['H']*C['Cpo']*C['ro'])-C['emissivity']*5.67E-8*(sst_boundary[n-1]**4)*C['dt']/(C['H']*C['Cpo']*C['ro'])+(T_boundary[nn]-sst_boundary[n-1])*C['dt']/C['period']\n \n ############################################\n # \n # ATMOSPHERE\n #\n ############################################\n #\n # ADVECTION\n #\n # set atm_adv=False is no atmospheric advection - note that we still need to know the wind speed to resolve heat fluxes\n if atm_adv:\n a_adv = np.concatenate([sst_boundary[n-1]-tas[n-1,:1],tas[n-1,:-1]-tas[n-1,1:]],axis=0)*(C['va']*C['dt']/C['xx'])\n else:\n a_adv = 0 \n #\n # DIFFUSION\n # \n a_diff = (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2))\n a_diff0 = (tas[n-1,1]+sst_boundary[n-1]-2*tas[n-1,0])*(C['Da']*C['dt']/(C['xx']**2))\n a_diff = np.concatenate([np.array([a_diff0]),a_diff,a_diff[-1:]],axis=0)\n #\n # SURFACE FLUXES\n #\n a_netsflx = (sflx*C['dt'])/(C['Hb']*C['Cp']*C['ra']) - LW_cooling*C['dt']/(C['Hb']*C['Cp']*C['ra'])\n #\n #\n if return_coupled_fluxes:\n atm_DA_tendencies[nn,:] = a_adv + a_diff\n #\n # ATM UPDATE\n #\n if use_atm_tendencies:\n tas[n,:] = tas[n-1,:] + a_netsflx + atm_DA_tendencies[c3,:]\n else:\n tas[n,:] = tas[n-1,:] + a_netsflx + a_adv + a_diff\n #\n ################################################\n # \n # OCEAN \n #\n ################################################\n # AND DIFFUSION + ENTRAINMENT\n # ocean advection\n # \n # ADVECTION set vo=0 for stagnant ocean (slab)\n #\n o_adv = np.concatenate([sst_boundary[n-1]-sst[n-1,:1],sst[n-1,:-1]-sst[n-1,1:]],axis=0)*(C['vo']*C['dt']/C['xx'])\n #\n # DIFFUSION\n #\n o_diff = (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2))\n o_diff0 = (sst[n-1,1]+sst_boundary[n-1]-2*sst[n-1,0])*(C['Do']*C['dt']/(C['xx']**2))\n o_diff = np.concatenate([np.array([o_diff0]),o_diff,o_diff[-1:]],axis=0)\n #\n # ENTRAINMENT - RESTORING TO AN AMBIENT WATER MASS (CAN BE SEEN AS LATERAL OR VERTICAL MIXING)\n # set tau_entrainment=0 for no entrainment\n if C['tau_entrainment']>0:\n o_entrain = (C['T0']-sst[n-1,:])*C['dt']/C['tau_entrainment']\n else:\n o_entrain = 0\n # \n # SURFACE FLUXES \n #\n o_netsflx = -sflx*C['dt']/(C['H']*C['Cpo']*C['ro'])+SW_warming[:,nn]*C['dt']/(C['H']*C['Cpo']*C['ro'])\n #\n if return_coupled_fluxes:\n ocn_DA_tendencies[nn,:] = o_adv + o_diff + o_entrain\n #\n # OCN update\n if use_ocn_tendencies:\n sst[n,:] = sst[n-1,:] + o_netsflx + ocn_DA_tendencies[c3,:]\n else:\n sst[n,:] = sst[n-1,:] + o_netsflx + o_adv + o_diff + o_entrain\n #\n if ice_model:\n # THIS IS A DIAGNOSTIC SEA ICE MODEL\n # \n # SST is first allowed to cool below freezing and then we form sea ice from the excess_freeze \n # i.e the amount that heat that is used to cool SST below freezing is converted to ice instead.\n # Similarly, SST is allowed to warm above Tf even if there is ice, and then excess_melt, \n # i.e. the amount of heat that is used to warm the water is first used to melt ice, \n # and then the rest can warm the water.\n #\n # This scheme conserves energy - it simply switches it between ocean and ice storages\n #\n # advection\n #hice[n-1,1:]=hice[n-1,1:]-(hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])\n #dhice = (hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])\n #hice[n-1,:-1] = hice[n-1,:-1] -dhice\n #hice[n-1,-1] = hice[n-1,-1] + dhice[-1]\n #\n ice_mask = (hice[n-1,:]>0).astype(np.float) #cells where there is ice to melt\n freezing_mask = (sst[n,:]<C['Tf']).astype(np.float) #cells where freezing will happen\n # change in energy\n dEdt = C['H']*C['ro']*C['Cpo']*(sst[n,:]-sst[n-1,:])/C['dt']\n # negative change in energy will produce ice whenver the water would otherwise cool below freezing\n excess_freeze = freezing_mask*np.max([-dEdt,np.zeros(C['nx'])],axis=0)\n # positive change will melt ice where there is ice\n excess_melt = ice_mask*np.max([dEdt,np.zeros(C['nx'])],axis=0)\n # note that freezing and melting will never happen at the same time in the same cell\n # freezing\n dhice_freeze = C['dt']*excess_freeze/(C['Li']*C['ri'])\n # melting\n dhice_melt= C['dt']*excess_melt/(C['Li']*C['ri'])\n # update\n hice[n,:] = hice[n-1,:] + dhice_freeze - dhice_melt\n # check how much energy was used for melting sea ice - remove this energy from ocean\n hice_melt = (dhice_melt>0).astype(np.float)*np.min([dhice_melt,hice[n-1,:]],axis=0)\n # Do not allow ice to be negative - that energy is kept in the ocean all the time. \n # The line above ensures that not more energy than is needed to melt the whole ice cover\n # is removed from the ocean at any given time\n hice[n,:] = np.max([hice[n,:],np.zeros(C['nx'])],axis=0)\n #\n # Update SST\n # Give back the energy that was used for freezing (will keep the water temperature above freezing)\n sst[n,:] = sst[n,:] + C['dt']*excess_freeze/(C['H']*C['Cpo']*C['ro']) \n # take out the heat that was used to melt ice \n # (need to cap to hice, the extra heat is never used and will stay in the ocean)\n sst[n,:] = sst[n,:] - hice_melt*(C['Li']*C['ri'])/(C['ro']*C['Cpo']*C['H'])\n #\n #############################\n # --- PREPARE OUTPUT ----\n #############################\n # accumulate output\n tas_out[c,:] = tas_out[c,:]+tas[n,:]\n sst_out[c,:] = sst_out[c,:]+sst[n,:]\n hice_out[c,:] = hice_out[c,:]+hice[n,:]\n sflx_out[c,:] = sflx_out[c,:]+sflx\n # accumulate averaging counter\n c2=c2+1\n c3=c3+1\n if ((nn+1)*C['dt'])%(360*24*3600)==0:\n #print(nn)\n c3=0\n #calculate the average for the output\n if (((nn+1)*C['dt'])%C['avep']==0 and nn>0):\n tas_out[c,:] = tas_out[c,:]/c2\n sst_out[c,:] = sst_out[c,:]/c2\n sflx_out[c,:] = sflx_out[c,:]/c2\n sflx_f_out[c,:] = sflx_f_out[c,:]/c2\n hice_out[c,:] = hice_out[c,:]/c2\n # update counters\n c = c+1\n c2 = 0\n if ((nn+1)*C['dt'])%(360*24*3600)==0:\n print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])\n #update the variables\n tas[0,:] = tas[1,:].copy()\n sst[0,:] = sst[1,:].copy()\n hice[0,:] = hice[1,:].copy()\n # SST at the boundary\n sst_boundary[n-1]=sst_boundary[n-1]+sst_boundary_tendency\n #\n #\n # if there is no ice, set to nan\n hice_out[np.where(hice_out==0)]=np.nan\n #\n if return_coupled_fluxes:\n return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies\n else:\n return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt\n\n\n#@jit(nopython=True)\ndef CoupledChannel_time(nt,nx,xx,dt,avep,sst,tas,hice,sst_boundary,sst_out,tas_out,hice_out,sflx_f_out,sflx_out,forcing,spatial_pattern,ra,Cp,va,vo,Da,Do,Cs,T0,Tf,emissivity,SW0,SW_anom,H,Hb,Cpo,ro,tau_entrainment,Li,ri,use_ocn_tendencies,use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies,ice_model,atm_adv,return_coupled_fluxes):\n '''\n Separate time loop to enable numba\n '''\n #initialize counters\n c=0; c2=0; c3=0; n=1\n #####################\n # --- TIME LOOP ---\n #####################\n for nn in range(nt):\n #\n # FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified\n sflx=forcing[nn]*spatial_pattern #+ forcing[nn]*random_amp*np.random.rand(C['nx'])\n #\n # save the forcing component\n #\n sflx_f_out[c,:]=sflx_f_out[c,:]+sflx\n #\n # SURFACE HEAT FLUXES\n # Add sensible heat flux to the total surface flux in W/m**-2\n sflx=sflx+ra*Cp*va*Cs*(sst[n-1,:]-tas[n-1,:])\n # RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean\n LW_cooling = emissivity*5.67E-8*(tas[n-1,:]**4)\n SW_warming = SW0+max(SW_anom*np.sin(2*float(nn)*dt*np.pi/(360*24*3600)),0.0)\n #net_radiation = SW_warming-LW_cooling \n net_radiation = -LW_cooling\n #\n # OCEAN BOUNDARY CONDITION - SET dT to zero to suppress the sin\n sst_boundary[n]=sst_boundary[n-1]+SW_warming[0]*dt/(H*Cpo*ro)-emissivity*5.67E-8*(sst_boundary[n-1]**4)*dt/(H*Cpo*ro)+(T0-sst_boundary[n-1])*dt/(360*24*3600) #C['T0']+C['dT']*np.sin(nn*C['dt']*np.pi/C['period']) + \n #\n # ATMOSPHERE - ADVECTION AND DIFFUSION\n # set atm_adv=False is no atmospheric advection - note that we need to know the wind speed to resolve heat fluxes\n if atm_adv:\n a_adv = np.concatenate((sst_boundary[n-1]-tas[n-1,:1],tas[n-1,:-1]-tas[n-1,1:]),axis=0)*(va*dt/xx)\n #tas[n,0]=tas[n-1,0]+(C['T0']-tas[n-1,0])*(C['va']*C['dt']/C['xx']) #always constant temperature blowing over the ocean from land\n #tas[n,0]=tas[n-1,0]+(sst[n,0]-tas[n-1,0])*(C['va']*C['dt']/C['xx']) #atmospheric temperature at the boundary is in equilibrium with the ocean\n #tas[n,1:]=tas[n-1,1:]+(tas[n-1,:-1]-tas[n-1,1:])*(C['va']*C['dt']/C['xx'])\n else:\n #tas[n,:] = tas[n-1,0]\n a_adv = np.zeros(nx)\n #\n # DIFFUSION\n # \n #tas[n,1:-1] = tas[n,1:-1] + (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2))\n a_diff = (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(Da*dt/(xx**2))\n a_diff0 = (tas[n-1,1]+sst_boundary[n-1]-2*tas[n-1,0])*(Da*dt/(xx**2))\n a_diff = np.concatenate((np.array([a_diff0]),a_diff,a_diff[-1:]),axis=0)\n #\n # ATMOSPHERE - SURFACE FLUXES\n #\n a_netsflx = (sflx*dt)/(Hb*Cp*ra) + net_radiation*dt/(Hb*Cp*ra)\n #\n # full update\n #\n #\n if return_coupled_fluxes:\n atm_DA_tendencies[nn,:]=np.sum((a_adv,a_diff),axis=0)\n #\n if use_atm_tendencies:\n tas[n,:] = tas[n-1,:] + a_netsflx + atm_DA_tendencies[c3,:]\n else:\n tas[n,:] = tas[n-1,:] + a_netsflx + a_adv + a_diff\n #\n # OCEAN - ADVECTION AND DIFFUSION + ENTRAINMENT\n # ocean advection\n # set vo=0 for stagnant ocean (slab)\n #\n #sst[n,1:] = sst[n-1,1:]+(sst[n-1,:-1]-sst[n-1,1:])*(1-ocn_mixing_ratio)*(C['vo']*C['dt']/C['xx'])+(C['T0']-sst[n-1,1:])*ocn_mixing_ratio*(C['vo']*C['dt']/C['xx'])\n o_adv = np.concatenate((sst_boundary[n-1]-sst[n-1,:1],sst[n-1,:-1]-sst[n-1,1:]),axis=0)*(vo*dt/xx)\n # DIFFUSION\n #sst[n,1:-1] = sst[n,1:-1] + (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2))\n o_diff = (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(Do*dt/(xx**2))\n o_diff0 = (sst[n-1,1]+sst_boundary[n-1]-2*sst[n-1,0])*(Do*dt/(xx**2))\n o_diff = np.concatenate((np.array([o_diff0]),o_diff,o_diff[-1:]),axis=0)\n # ENTRAINMENT (damping by a lower layer)\n o_entrain = (T0-sst[n-1,:])*dt/tau_entrainment\n #sst[n,1:]=sst[n,1:]+(C['T0']-sst[n-1,1:])*C['dt']/C['tau_entrainment']\n # \n # OCEAN - SURFACE FLUXES \n #\n o_netsflx = -sflx*dt/(H*Cpo*ro)+SW_warming*dt/(H*Cpo*ro)\n #sst[n,:]=sst[n,:]-(sflx*C['dt'])/(C['H']*C['Cpo']*C['ro'])\n if return_coupled_fluxes:\n ocn_DA_tendencies[nn,:] = o_adv + o_diff + o_entrain\n # OCN update\n if use_ocn_tendencies:\n sst[n,:] = sst[n-1,:] + o_netsflx + ocn_DA_tendencies[c3,:]\n else:\n sst[n,:] = sst[n-1,:] + o_netsflx + o_adv + o_diff + o_entrain\n #\n if ice_model:\n # THIS IS A DIAGNOSTIC SEA ICE MODEL\n # \n # sst is first allowed to cool below freezing and then we forM sea ice from the excess_freeze \n # i.e the amount that heat that is used to cool sst below freezing is converted to ice instead\n # similarly sst is allowed to warm above Tf even if there is ice, and then excess_melt, \n # i.e. the amount of heat that is used to warm the water is first used to melt ice, \n # and then the rest can warm water. This scheme conserves energy - it simply switches it between ocean and ice\n #\n ice_mask = (hice[n-1,:]>0).astype(np.float) #cells where there is ice to melt\n freezing_mask = (sst[n,:]<Tf).astype(np.float) #cells where freezing will happen\n # change in energy\n dEdt = H*ro*Cpo*(sst[n,:]-sst[n-1,:])/dt\n # negative change in energy will produce ice whenver the water would otherwise cool below freezing\n excess_freeze = freezing_mask*np.max([-dEdt,np.zeros(nx)],axis=0)\n # positive change will melt ice where there is ice\n excess_melt = ice_mask*np.max([dEdt,np.zeros(nx)],axis=0)\n # note that freezing and melting will never happen at the same time in the same cell\n # freezing\n dhice_freeze = dt*excess_freeze/(Li*ri)\n # melting\n dhice_melt= dt*excess_melt/(Li*ri)\n # update\n hice[n,:] = hice[n-1,:] + dhice_freeze - dhice_melt\n # check how much energy was used for melting sea ice - remove this energy from ocean\n hice_melt = (dhice_melt>0).astype(np.float)*np.min([dhice_melt,hice[n-1,:]],axis=0)\n # Do not allow ice to be negative - that energy is kept in the ocean all the time. \n # The line above ensures that not more energy than is needed to melt the whole ice cover\n # is removed from the ocean at any given time\n hice[n,:] = np.max([hice[n,:],np.zeros(nx)],axis=0)\n #\n # Update SST\n # Give back the energy that was used for freezing (will keep the water temperature above freezing)\n sst[n,:] = sst[n,:] + dt*excess_freeze/(H*Cpo*ro) \n # take out the heat that was used to melt ice \n # (need to cap to hice, the extra heat is never used and will stay in the ocean)\n sst[n,:] = sst[n,:] - hice_melt*(Li*ri)/(ro*Cpo*H)\n #\n #############################\n # --- PREPARE OUTPUT ----\n #############################\n #accumulate\n tas_out[c,:] = tas_out[c,:]+tas[n,:]\n sst_out[c,:] = sst_out[c,:]+sst[n,:]\n hice_out[c,:] = hice_out[c,:]+hice[n,:]\n sflx_out[c,:] = sflx_out[c,:]+sflx\n # accumulate averaging counter\n c2=c2+1\n c3=c3+1\n if ((nn+1)*dt)%(360*24*3600)==0:\n #print(nn)\n c3=0\n #calculate the average for the output\n if (((nn+1)*dt)%avep==0 and nn>0):\n tas_out[c,:] = tas_out[c,:]/c2\n sst_out[c,:] = sst_out[c,:]/c2\n sflx_out[c,:] = sflx_out[c,:]/c2\n sflx_f_out[c,:] = sflx_f_out[c,:]/c2\n hice_out[c,:] = hice_out[c,:]/c2\n # update counters\n c = c+1\n c2 = 0\n #if ((nn+1)*C['dt'])%(360*24*3600)==0:\n # print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])\n #update the variables\n tas[0,:] = tas[1,:].copy()\n sst[0,:] = sst[1,:].copy()\n hice[0,:] = hice[1,:].copy()\n sst_boundary[0]=sst_boundary[1].copy()\n #\n hice_out[np.where(hice_out==0)]=np.nan\n #\n return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies\n\ndef CoupledChannel2(C,forcing, dt_f=30*24*3600, ocn_mixing_ratio=0, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):\n '''\n This is the main function for the coupled ocean--atm channel model.\n \n ## INPUT VARIABLES ##\n \n tmax: running time in seconds\n avep: averaging period for the ouput\n T0: initial temperature\n forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2\n dt_f: timestep of the forcing\n atm_adv: boolean, advective atmosphere\n atm_ocn: boolean, advective ocean\n ocn_mixing: add non-local mixing to ocean\n ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)\n \n '''\n #\n #print(C)\n #print(C['T0'],C['SW0'],C['Da'],C['xx'])\n #\n nt=int(C['tmax']/C['dt']) #steps\n nt1=int(C['tmax']/C['avep'])\n tau=float(C['period'])/float(C['dt']) #this is period/dt, previously nt/8 \n rtas=np.random.rand(C['nx'])\n #print(rtas.max())\n #intitialize the model variables, only 2 timesteps deep scheme\n sst=C['T0']*np.ones((2,C['nx']))\n tas=C['T0']*np.ones((2,C['nx']))+rtas\n hice=np.zeros((2,C['nx']))\n sst_boundary=C['T0']*np.ones((2))\n #\n #print(sst.max(),tas.max())\n #interpolate forcing to the new timescale\n if np.all(forcing!=None):\n forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)\n else:\n forcing = np.zeros(nt+1)\n #\n #initialize outputs\n sst_out = np.zeros((nt1,C['nx']))\n tas_out = np.zeros((nt1,C['nx']))\n hice_out = np.zeros((nt1,C['nx']))\n sflx_f_out = np.zeros((nt1,C['nx'])) #forcing\n sflx_out = np.zeros((nt1,C['nx']))\n #spatial pattern of the forcing - assume a sine wave\n if np.all(spatial_pattern==None):\n spatial_pattern=np.ones(C['nx'])\n #\n if np.all(atm_DA_tendencies!=None):\n use_atm_tendencies=True\n else:\n use_atm_tendencies=False\n if np.all(ocn_DA_tendencies!=None):\n use_ocn_tendencies=True\n else:\n use_ocn_tendencies=False\n #\n atm_DA_tendencies = np.zeros((nt,C['nx']))\n ocn_DA_tendencies = np.zeros((nt,C['nx']))\n #\n tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies=CoupledChannel_time(nt,C['nx'],C['xx'],C['dt'],C['avep'],sst,tas,hice,sst_boundary,sst_out,tas_out,hice_out,sflx_f_out,sflx_out,forcing,spatial_pattern,C['ra'],C['Cp'],C['va'],C['vo'],C['Da'],C['Do'],C['Cs'],C['T0'],C['Tf'],C['emissivity'],C['SW0'],C['SW_anom'],C['H'],C['Hb'],C['Cpo'],C['ro'],C['tau_entrainment'],C['Li'],C['ri'],use_ocn_tendencies,use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies,ice_model,atm_adv,return_coupled_fluxes)\n #\n if return_coupled_fluxes:\n return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies\n else:\n return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt \n "
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.random.rand",
"numpy.zeros",
"numpy.sum",
"numpy.ones",
"numpy.tile",
"numpy.min",
"numpy.where",
"numpy.arange",
"numpy.cos",
"numpy.all"
]
] |
CactusJackFX/PyImageSearch_Guru
|
[
"01f5bce644b58848db029f72656002e21545bb10",
"01f5bce644b58848db029f72656002e21545bb10"
] |
[
"Module_02_Building_Your_Own_Custom_Object_Detector/2.10_Re-Training_and_Running_your_Classifier/hard_negative_mine.py",
"Module_02_Building_Your_Own_Custom_Object_Detector/2.8_Non-Maxima_Suppression/test_model.py"
] |
[
"# USAGE\n# python hard_negative_mine.py --conf conf/cars.json\n\n# import the necessary packages\nfrom __future__ import print_function\nfrom pyimagesearch.object_detection import ObjectDetector\nfrom pyimagesearch.descriptors import HOG\nfrom pyimagesearch.utils import dataset\nfrom pyimagesearch.utils import Conf\nfrom imutils import paths\nimport numpy as np\nimport progressbar\nimport argparse\nimport pickle\nimport random\nimport cv2\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--conf\", required=True, help=\"path to the configuration file\")\nargs = vars(ap.parse_args())\n\n# load the configuration file and initialize the data list\nconf = Conf(args[\"conf\"])\ndata = []\n\n# load the classifier, then initialize the Histogram of Oriented Gradients descriptor\n# and the object detector\nmodel = pickle.loads(open(conf[\"classifier_path\"], \"rb\").read())\nhog = HOG(orientations=conf[\"orientations\"], pixelsPerCell=tuple(conf[\"pixels_per_cell\"]),\n\tcellsPerBlock=tuple(conf[\"cells_per_block\"]), normalize=conf[\"normalize\"], block_norm=\"L1\")\nod = ObjectDetector(model, hog)\n\n# grab the set of distraction paths and randomly sample them\ndstPaths = list(paths.list_images(conf[\"image_distractions\"]))\ndstPaths = random.sample(dstPaths, conf[\"hn_num_distraction_images\"])\n\n# setup the progress bar\nwidgets = [\"Mining: \", progressbar.Percentage(), \" \", progressbar.Bar(), \" \", progressbar.ETA()]\npbar = progressbar.ProgressBar(maxval=len(dstPaths), widgets=widgets).start()\n\n# loop over the distraction paths\nfor (i, imagePath) in enumerate(dstPaths):\n\t# load the image and convert it to grayscale\n\timage = cv2.imread(imagePath)\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\t# detect objects in the image\n\t(boxes, probs) = od.detect(gray, conf[\"window_dim\"], winStep=conf[\"hn_window_step\"],\n\t\tpyramidScale=conf[\"hn_pyramid_scale\"], minProb=conf[\"hn_min_probability\"])\n\n\t# loop over the bounding boxes\n\tfor (prob, (startX, startY, endX, endY)) in zip(probs, boxes):\n\t\t# extract the ROI from the image, resize it to a known, canonical size, extract\n\t\t# HOG features from teh ROI, and finally update the data\n\t\troi = cv2.resize(gray[startY:endY, startX:endX], tuple(conf[\"window_dim\"]),\n\t\t\tinterpolation=cv2.INTER_AREA)\n\t\tfeatures = hog.describe(roi)\n\t\tdata.append(np.hstack([[prob], features]))\n\n\t# update the progress bar\n\tpbar.update(i)\n\n# sort the data points by confidence\npbar.finish()\nprint(\"[INFO] sorting by probability...\")\ndata = np.array(data)\ndata = data[data[:, 0].argsort()[::-1]]\n\n# dump the dataset to file\nprint(\"[INFO] dumping hard negatives to file...\")\ndataset.dump_dataset(data[:, 1:], [-1] * len(data), conf[\"features_path\"], \"hard_negatives\",\n\twriteMethod=\"a\")",
"# USAGE\n# python test_model.py --conf conf/cars.json --image datasets/caltech101/101_ObjectCategories/car_side/image_0035.jpg\n\n# import the necessary packages\nfrom pyimagesearch.object_detection import non_max_suppression\nfrom pyimagesearch.object_detection import ObjectDetector\nfrom pyimagesearch.descriptors import HOG\nfrom pyimagesearch.utils import Conf\nimport numpy as np\nimport imutils\nimport argparse\nimport pickle\nimport cv2\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--conf\", required=True, help=\"path to the configuration file\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to the image to be classified\")\nargs = vars(ap.parse_args())\n\n# load the configuration file\nconf = Conf(args[\"conf\"])\n\n# load the classifier, then initialize the Histogram of Oriented Gradients descriptor\n# and the object detector\nmodel = pickle.loads(open(conf[\"classifier_path\"], \"rb\").read())\nhog = HOG(orientations=conf[\"orientations\"], pixelsPerCell=tuple(conf[\"pixels_per_cell\"]),\n\tcellsPerBlock=tuple(conf[\"cells_per_block\"]), normalize=conf[\"normalize\"], block_norm=\"L1\")\nod = ObjectDetector(model, hog)\n\n# load the image and convert it to grayscale\nimage = cv2.imread(args[\"image\"])\nimage = imutils.resize(image, width=min(260, image.shape[1]))\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# detect objects in the image and apply non-maxima suppression to the bounding boxes\n(boxes, probs) = od.detect(gray, conf[\"window_dim\"], winStep=conf[\"window_step\"],\n\tpyramidScale=conf[\"pyramid_scale\"], minProb=conf[\"min_probability\"])\npick = non_max_suppression(np.array(boxes), probs, conf[\"overlap_thresh\"])\norig = image.copy()\n\n# loop over the original bounding boxes and draw them\nfor (startX, startY, endX, endY) in boxes:\n\tcv2.rectangle(orig, (startX, startY), (endX, endY), (0, 0, 255), 2)\n\n# loop over the allowed bounding boxes and draw them\nfor (startX, startY, endX, endY) in pick:\n\tcv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\n\n# show the output images\ncv2.imshow(\"Original\", orig)\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)"
] |
[
[
"numpy.hstack",
"numpy.array"
],
[
"numpy.array"
]
] |
qinchangping/tensorflow
|
[
"f7f7036d1cdc5716aff976fae0ea4d1b9a931b56"
] |
[
"tensorflow/python/keras/_impl/keras/applications/mobilenet.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n# pylint: disable=unused-import\n\"\"\"MobileNet v1 models for Keras.\n\nMobileNet is a general architecture and can be used for multiple use cases.\nDepending on the use case, it can use different input layer size and\ndifferent width factors. This allows different width models to reduce\nthe number of multiply-adds and thereby\nreduce inference cost on mobile devices.\n\nMobileNets support any input size greater than 32 x 32, with larger image sizes\noffering better performance.\nThe number of parameters and number of multiply-adds\ncan be modified by using the `alpha` parameter,\nwhich increases/decreases the number of filters in each layer.\nBy altering the image size and `alpha` parameter,\nall 16 models from the paper can be built, with ImageNet weights provided.\n\nThe paper demonstrates the performance of MobileNets using `alpha` values of\n1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.\nFor each of these `alpha` values, weights for 4 different input image sizes\nare provided (224, 192, 160, 128).\n\nThe following table describes the size and accuracy of the 100% MobileNet\non size 224 x 224:\n----------------------------------------------------------------------------\nWidth Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)\n----------------------------------------------------------------------------\n| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |\n| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |\n| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |\n| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |\n----------------------------------------------------------------------------\n\nThe following table describes the performance of\nthe 100 % MobileNet on various input sizes:\n------------------------------------------------------------------------\n Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)\n------------------------------------------------------------------------\n| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |\n| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |\n| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |\n| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |\n------------------------------------------------------------------------\n\nThe weights for all 16 models are obtained and translated\nfrom TensorFlow checkpoints found at\nhttps://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md\n\n# Reference\n- [MobileNets: Efficient Convolutional Neural Networks for\n Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf))\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import constraints\nfrom tensorflow.python.keras._impl.keras import initializers\nfrom tensorflow.python.keras._impl.keras import regularizers\nfrom tensorflow.python.keras._impl.keras.applications import imagenet_utils\nfrom tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape\nfrom tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\nfrom tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs\nfrom tensorflow.python.keras._impl.keras.engine.topology import shape_type_conversion\nfrom tensorflow.python.keras._impl.keras.layers import Activation\nfrom tensorflow.python.keras._impl.keras.layers import BatchNormalization\nfrom tensorflow.python.keras._impl.keras.layers import Conv2D\nfrom tensorflow.python.keras._impl.keras.layers import Dropout\nfrom tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D\nfrom tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D\nfrom tensorflow.python.keras._impl.keras.layers import Input\nfrom tensorflow.python.keras._impl.keras.layers import Reshape\nfrom tensorflow.python.keras._impl.keras.models import Model\nfrom tensorflow.python.keras._impl.keras.utils import conv_utils\nfrom tensorflow.python.keras._impl.keras.utils.data_utils import get_file\nfrom tensorflow.python.platform import tf_logging as logging\n\n\nBASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'\n\n\ndef relu6(x):\n return K.relu(x, max_value=6)\n\n\ndef preprocess_input(x):\n \"\"\"Preprocesses a numpy array encoding a batch of images.\n\n Arguments:\n x: a 4D numpy array consists of RGB values within [0, 255].\n\n Returns:\n Preprocessed array.\n \"\"\"\n return imagenet_utils.preprocess_input(x, mode='tf')\n\n\nclass DepthwiseConv2D(Conv2D):\n \"\"\"Depthwise separable 2D convolution.\n\n Depthwise Separable convolutions consists in performing\n just the first step in a depthwise spatial convolution\n (which acts on each input channel separately).\n The `depth_multiplier` argument controls how many\n output channels are generated per input channel in the depthwise step.\n\n Arguments:\n kernel_size: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: one of `'valid'` or `'same'` (case-insensitive).\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be 'channels_last'.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. 'linear' activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n depthwise_initializer: Initializer for the depthwise kernel matrix.\n bias_initializer: Initializer for the bias vector.\n depthwise_regularizer: Regularizer function applied to\n the depthwise kernel matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its 'activation')..\n depthwise_constraint: Constraint function applied to\n the depthwise kernel matrix.\n bias_constraint: Constraint function applied to the bias vector.\n\n Input shape:\n 4D tensor with shape:\n `[batch, channels, rows, cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch, rows, cols, channels]` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `[batch, filters, new_rows, new_cols]` if data_format='channels_first'\n or 4D tensor with shape:\n `[batch, new_rows, new_cols, filters]` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to padding.\n \"\"\"\n\n def __init__(self,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n depth_multiplier=1,\n data_format=None,\n activation=None,\n use_bias=True,\n depthwise_initializer='glorot_uniform',\n bias_initializer='zeros',\n depthwise_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n depthwise_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(DepthwiseConv2D, self).__init__(\n filters=None,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n activation=activation,\n use_bias=use_bias,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n bias_constraint=bias_constraint,\n **kwargs)\n self.depth_multiplier = depth_multiplier\n self.depthwise_initializer = initializers.get(depthwise_initializer)\n self.depthwise_regularizer = regularizers.get(depthwise_regularizer)\n self.depthwise_constraint = constraints.get(depthwise_constraint)\n self.bias_initializer = initializers.get(bias_initializer)\n\n @shape_type_conversion\n def build(self, input_shape):\n if len(input_shape) < 4:\n raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '\n 'Received input shape:', str(input_shape))\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = 3\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs to '\n '`DepthwiseConv2D` '\n 'should be defined. Found `None`.')\n input_dim = int(input_shape[channel_axis])\n depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],\n input_dim, self.depth_multiplier)\n\n self.depthwise_kernel = self.add_weight(\n shape=depthwise_kernel_shape,\n initializer=self.depthwise_initializer,\n name='depthwise_kernel',\n regularizer=self.depthwise_regularizer,\n constraint=self.depthwise_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(input_dim * self.depth_multiplier,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs, training=None):\n outputs = K.depthwise_conv2d(\n inputs,\n self.depthwise_kernel,\n strides=self.strides,\n padding=self.padding,\n dilation_rate=self.dilation_rate,\n data_format=self.data_format)\n\n if self.bias:\n outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n\n return outputs\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n rows = input_shape[2]\n cols = input_shape[3]\n out_filters = input_shape[1] * self.depth_multiplier\n elif self.data_format == 'channels_last':\n rows = input_shape[1]\n cols = input_shape[2]\n out_filters = input_shape[3] * self.depth_multiplier\n\n rows = conv_utils.conv_output_length(rows, self.kernel_size[0],\n self.padding, self.strides[0])\n cols = conv_utils.conv_output_length(cols, self.kernel_size[1],\n self.padding, self.strides[1])\n\n if self.data_format == 'channels_first':\n return (input_shape[0], out_filters, rows, cols)\n elif self.data_format == 'channels_last':\n return (input_shape[0], rows, cols, out_filters)\n\n def get_config(self):\n config = super(DepthwiseConv2D, self).get_config()\n config.pop('filters')\n config.pop('kernel_initializer')\n config.pop('kernel_regularizer')\n config.pop('kernel_constraint')\n config['depth_multiplier'] = self.depth_multiplier\n config['depthwise_initializer'] = initializers.serialize(\n self.depthwise_initializer)\n config['depthwise_regularizer'] = regularizers.serialize(\n self.depthwise_regularizer)\n config['depthwise_constraint'] = constraints.serialize(\n self.depthwise_constraint)\n return config\n\n\ndef MobileNet(input_shape=None,\n alpha=1.0,\n depth_multiplier=1,\n dropout=1e-3,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000):\n \"\"\"Instantiates the MobileNet architecture.\n\n Note that only TensorFlow is supported for now,\n therefore it only works with the data format\n `image_data_format='channels_last'` in your Keras config\n at `~/.keras/keras.json`.\n\n To load a MobileNet model via `load_model`, import the custom\n objects `relu6` and `DepthwiseConv2D` and pass them to the\n `custom_objects` parameter.\n E.g.\n model = load_model('mobilenet.h5', custom_objects={\n 'relu6': mobilenet.relu6,\n 'DepthwiseConv2D': mobilenet.DepthwiseConv2D})\n\n Arguments:\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False (otherwise the input shape\n has to be `(224, 224, 3)` (with `channels_last` data format)\n or (3, 224, 224) (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(200, 200, 3)` would be one valid value.\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n depth_multiplier: depth multiplier for depthwise convolution\n (also called the resolution multiplier)\n dropout: dropout rate\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of\n `layers.Input()`)\n to use as image input for the model.\n pooling: Optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model\n will be the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a\n 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n Returns:\n A Keras model instance.\n\n Raises:\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n \"\"\"\n\n if K.backend() != 'tensorflow':\n raise RuntimeError('Only TensorFlow backend is currently supported, '\n 'as other backends do not support '\n 'depthwise convolution.')\n\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as ImageNet with `include_top` '\n 'as true, `classes` should be 1000')\n\n # Determine proper input shape and default size.\n if input_shape is None:\n default_size = 224\n else:\n if K.image_data_format() == 'channels_first':\n rows = input_shape[1]\n cols = input_shape[2]\n else:\n rows = input_shape[0]\n cols = input_shape[1]\n\n if rows == cols and rows in [128, 160, 192, 224]:\n default_size = rows\n else:\n default_size = 224\n\n input_shape = _obtain_input_shape(\n input_shape,\n default_size=default_size,\n min_size=32,\n data_format=K.image_data_format(),\n require_flatten=include_top,\n weights=weights)\n\n if K.image_data_format() == 'channels_last':\n row_axis, col_axis = (0, 1)\n else:\n row_axis, col_axis = (1, 2)\n rows = input_shape[row_axis]\n cols = input_shape[col_axis]\n\n if weights == 'imagenet':\n if depth_multiplier != 1:\n raise ValueError('If imagenet weights are being loaded, '\n 'depth multiplier must be 1')\n\n if alpha not in [0.25, 0.50, 0.75, 1.0]:\n raise ValueError('If imagenet weights are being loaded, '\n 'alpha can be one of'\n '`0.25`, `0.50`, `0.75` or `1.0` only.')\n\n if rows != cols or rows not in [128, 160, 192, 224]:\n raise ValueError('If imagenet weights are being loaded, '\n 'input must have a static square shape (one of '\n '(128,128), (160,160), (192,192), or (224, 224)).'\n ' Input shape provided = %s' % (input_shape,))\n\n if K.image_data_format() != 'channels_last':\n logging.warning('The MobileNet family of models is only available '\n 'for the input data format \"channels_last\" '\n '(width, height, channels). '\n 'However your settings specify the default '\n 'data format \"channels_first\" (channels, width, height).'\n ' You should set `image_data_format=\"channels_last\"` '\n 'in your Keras config located at ~/.keras/keras.json. '\n 'The model being returned right now will expect inputs '\n 'to follow the \"channels_last\" data format.')\n K.set_image_data_format('channels_last')\n old_data_format = 'channels_first'\n else:\n old_data_format = None\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n x = _conv_block(img_input, 32, alpha, strides=(2, 2))\n x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)\n\n x = _depthwise_conv_block(\n x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)\n x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)\n\n x = _depthwise_conv_block(\n x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)\n x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)\n\n x = _depthwise_conv_block(\n x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)\n x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)\n x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)\n x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)\n x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)\n x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)\n\n x = _depthwise_conv_block(\n x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)\n x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)\n\n if include_top:\n if K.image_data_format() == 'channels_first':\n shape = (int(1024 * alpha), 1, 1)\n else:\n shape = (1, 1, int(1024 * alpha))\n\n x = GlobalAveragePooling2D()(x)\n x = Reshape(shape, name='reshape_1')(x)\n x = Dropout(dropout, name='dropout')(x)\n x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)\n x = Activation('softmax', name='act_softmax')(x)\n x = Reshape((classes,), name='reshape_2')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n # Create model.\n model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))\n\n # load weights\n if weights == 'imagenet':\n if K.image_data_format() == 'channels_first':\n raise ValueError('Weights for \"channels_last\" format '\n 'are not available.')\n if alpha == 1.0:\n alpha_text = '1_0'\n elif alpha == 0.75:\n alpha_text = '7_5'\n elif alpha == 0.50:\n alpha_text = '5_0'\n else:\n alpha_text = '2_5'\n\n if include_top:\n model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)\n weigh_path = BASE_WEIGHT_PATH + model_name\n weights_path = get_file(model_name, weigh_path, cache_subdir='models')\n else:\n model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)\n weigh_path = BASE_WEIGHT_PATH + model_name\n weights_path = get_file(model_name, weigh_path, cache_subdir='models')\n model.load_weights(weights_path)\n elif weights is not None:\n model.load_weights(weights)\n\n if old_data_format:\n K.set_image_data_format(old_data_format)\n return model\n\n\ndef _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):\n \"\"\"Adds an initial convolution layer (with batch normalization and relu6).\n\n Arguments:\n inputs: Input tensor of shape `(rows, cols, 3)`\n (with `channels_last` data format) or\n (3, rows, cols) (with `channels_first` data format).\n It should have exactly 3 inputs channels,\n and width and height should be no smaller than 32.\n E.g. `(224, 224, 3)` would be one valid value.\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n kernel: An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n\n Input shape:\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(samples, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to stride.\n\n Returns:\n Output tensor of block.\n \"\"\"\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n filters = int(filters * alpha)\n x = Conv2D(\n filters,\n kernel,\n padding='same',\n use_bias=False,\n strides=strides,\n name='conv1')(\n inputs)\n x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)\n return Activation(relu6, name='conv1_relu')(x)\n\n\ndef _depthwise_conv_block(inputs,\n pointwise_conv_filters,\n alpha,\n depth_multiplier=1,\n strides=(1, 1),\n block_id=1):\n \"\"\"Adds a depthwise convolution block.\n\n A depthwise convolution block consists of a depthwise conv,\n batch normalization, relu6, pointwise convolution,\n batch normalization and relu6 activation.\n\n Arguments:\n inputs: Input tensor of shape `(rows, cols, channels)`\n (with `channels_last` data format) or\n (channels, rows, cols) (with `channels_first` data format).\n pointwise_conv_filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the pointwise convolution).\n alpha: controls the width of the network.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.\n strides: An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n block_id: Integer, a unique identification designating the block number.\n\n Input shape:\n 4D tensor with shape:\n `(batch, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch, rows, cols, channels)` if data_format='channels_last'.\n\n Output shape:\n 4D tensor with shape:\n `(batch, filters, new_rows, new_cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(batch, new_rows, new_cols, filters)` if data_format='channels_last'.\n `rows` and `cols` values might have changed due to stride.\n\n Returns:\n Output tensor of block.\n \"\"\"\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n pointwise_conv_filters = int(pointwise_conv_filters * alpha)\n\n x = DepthwiseConv2D( # pylint: disable=not-callable\n (3, 3),\n padding='same',\n depth_multiplier=depth_multiplier,\n strides=strides,\n use_bias=False,\n name='conv_dw_%d' % block_id)(\n inputs)\n x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)\n x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)\n\n x = Conv2D(\n pointwise_conv_filters, (1, 1),\n padding='same',\n use_bias=False,\n strides=(1, 1),\n name='conv_pw_%d' % block_id)(\n x)\n x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)\n return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)\n"
] |
[
[
"tensorflow.python.keras._impl.keras.layers.BatchNormalization",
"tensorflow.python.keras._impl.keras.layers.Reshape",
"tensorflow.python.keras._impl.keras.backend.relu",
"tensorflow.python.keras._impl.keras.utils.conv_utils.conv_output_length",
"tensorflow.python.keras._impl.keras.constraints.get",
"tensorflow.python.keras._impl.keras.backend.backend",
"tensorflow.python.keras._impl.keras.backend.is_keras_tensor",
"tensorflow.python.keras._impl.keras.utils.data_utils.get_file",
"tensorflow.python.keras._impl.keras.backend.image_data_format",
"tensorflow.python.keras._impl.keras.layers.Activation",
"tensorflow.python.keras._impl.keras.constraints.serialize",
"tensorflow.python.keras._impl.keras.layers.Dropout",
"tensorflow.python.keras._impl.keras.regularizers.get",
"tensorflow.python.keras._impl.keras.engine.InputSpec",
"tensorflow.python.keras._impl.keras.applications.imagenet_utils.preprocess_input",
"tensorflow.python.keras._impl.keras.regularizers.serialize",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.keras._impl.keras.engine.topology.get_source_inputs",
"tensorflow.python.keras._impl.keras.models.Model",
"tensorflow.python.keras._impl.keras.backend.set_image_data_format",
"tensorflow.python.keras._impl.keras.layers.Conv2D",
"tensorflow.python.keras._impl.keras.layers.Input",
"tensorflow.python.keras._impl.keras.layers.GlobalAveragePooling2D",
"tensorflow.python.keras._impl.keras.backend.bias_add",
"tensorflow.python.keras._impl.keras.backend.depthwise_conv2d",
"tensorflow.python.keras._impl.keras.initializers.get",
"tensorflow.python.keras._impl.keras.initializers.serialize",
"tensorflow.python.keras._impl.keras.layers.GlobalMaxPooling2D"
]
] |
shism2/seqio
|
[
"63f96f1d29f7721af67d79c0265d7f937170ee20"
] |
[
"seqio/dataset_providers_test.py"
] |
[
"# Copyright 2022 The SeqIO Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for seqio.dataset_providers.\"\"\"\n\nimport copy\nimport functools\nimport os\nimport shutil\nfrom typing import Any, Callable, Mapping, Optional, Sequence\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom seqio import dataset_providers\nfrom seqio import feature_converters\nfrom seqio import metrics as metrics_lib\nfrom seqio import preprocessors\nfrom seqio import test_utils\nfrom seqio import utils\nfrom seqio import vocabularies\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\n\ntf.compat.v1.enable_eager_execution()\n\nTaskRegistry = dataset_providers.TaskRegistry\nMixtureRegistry = dataset_providers.MixtureRegistry\nmock = absltest.mock\nassert_dataset = test_utils.assert_dataset\ncreate_default_dataset = test_utils.create_default_dataset\n\n\nclass TasksTest(test_utils.FakeTaskTest):\n\n def test_invalid_name(self):\n with self.assertRaisesRegex(\n ValueError,\n \"Task name 'invalid/name' contains invalid characters. \"\n \"Must match regex: .*\"):\n self.add_task(\"invalid/name\", self.function_source)\n\n def test_repeat_name(self):\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Attempting to register duplicate provider: text_line_task\"):\n self.add_task(\"text_line_task\", self.text_line_source)\n\n def test_function_source_signature(self):\n # Good signatures.\n def good_fn(split, shuffle_files):\n del split\n del shuffle_files\n dataset_providers.FunctionDataSource(good_fn, splits=(\"train\",))\n\n def default_good_fn(split, shuffle_files=False):\n del split\n del shuffle_files\n dataset_providers.FunctionDataSource(default_good_fn, splits=(\"train\",))\n\n def seed_fn(split, shuffle_files=True, seed=0):\n del split\n del shuffle_files\n del seed\n dataset_providers.FunctionDataSource(seed_fn, splits=(\"train\",))\n\n def extra_kwarg_good_fn(split, shuffle_files, unused_kwarg=True):\n del split\n del shuffle_files\n dataset_providers.FunctionDataSource(extra_kwarg_good_fn, splits=(\"train\",))\n\n # Bad signatures.\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"'missing_shuff' must have positional args ('split', 'shuffle_files'), \"\n \"got: ('split',)\"):\n def missing_shuff(split):\n del split\n dataset_providers.FunctionDataSource(missing_shuff, splits=(\"train\",))\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"'missing_split' must have positional args ('split', 'shuffle_files'), \"\n \"got: ('shuffle_files',)\"):\n def missing_split(shuffle_files):\n del shuffle_files\n dataset_providers.FunctionDataSource(missing_split, splits=(\"train\",))\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"'extra_pos_arg' may only have positional args ('split', \"\n \"'shuffle_files'), got: ('split', 'shuffle_files', 'unused_arg')\"):\n def extra_pos_arg(split, shuffle_files, unused_arg):\n del split\n del shuffle_files\n dataset_providers.FunctionDataSource(extra_pos_arg, splits=(\"train\",))\n\n def test_metric_fn_signature(self):\n # pylint:disable=unused-argument\n\n add_task = functools.partial(self.add_task, source=self.function_source)\n\n def score_metric_fn(targets, scores):\n return {}\n\n def predict_metric_fn(targets, predictions):\n return {}\n\n valid_task = add_task(\n \"valid_metrics\", metric_fns=[score_metric_fn, predict_metric_fn])\n\n self.assertSameElements(\n [score_metric_fn, predict_metric_fn], valid_task.metric_fns)\n self.assertSameElements(\n [score_metric_fn], valid_task.score_metric_fns)\n self.assertSameElements(\n [predict_metric_fn], valid_task.predict_metric_fns)\n\n def extra_arg_metric_fn(targets, predictions, extra_param):\n return {}\n\n expected_error_message_prefix = (\n \"Metric functions must have positional arguments matching either \"\n \"('targets', 'predictions') or ('targets', 'scores'). Got: \")\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n expected_error_message_prefix +\n \"('targets', 'predictions', 'extra_param')\"):\n valid_task = add_task(\n \"extra_arg_metric\", metric_fns=[extra_arg_metric_fn])\n\n def bad_order_metric_fn(predictions, targets):\n return {}\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n expected_error_message_prefix + \"('predictions', 'targets')\"):\n valid_task = add_task(\n \"bad_order_metric\", metric_fns=[bad_order_metric_fn])\n\n def bad_default_metric_fn(targets, predictions=(0)):\n return {}\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n expected_error_message_prefix + \"('targets',)\"):\n valid_task = add_task(\n \"bad_default_metric\", metric_fns=[bad_default_metric_fn])\n\n def ok_default_metric_fn(targets, predictions, extra_param=3):\n return {}\n\n valid_task_2 = add_task(\n \"valid_metrics_2\", metric_fns=[ok_default_metric_fn])\n self.assertSameElements([ok_default_metric_fn], valid_task_2.metric_fns)\n self.assertEmpty(valid_task_2.score_metric_fns)\n self.assertSameElements(\n [ok_default_metric_fn], valid_task_2.predict_metric_fns)\n\n def predict_metric_fn_with_types(\n targets: Sequence[Mapping[str,\n Any]], predictions: Sequence[Mapping[str,\n Any]]\n ) -> Mapping[str, metrics_lib.MetricValue]:\n return {}\n\n valid_task_with_types = TaskRegistry.add(\n \"valid_metrics_with_types\",\n source=self.function_source,\n output_features={\n \"inputs\":\n dataset_providers.Feature(test_utils.sentencepiece_vocab()),\n \"targets\":\n dataset_providers.Feature(test_utils.sentencepiece_vocab())\n },\n metric_fns=[predict_metric_fn_with_types])\n\n self.assertSameElements([predict_metric_fn_with_types],\n valid_task_with_types.metric_fns)\n\n # pylint:enable=unused-argument\n\n def test_no_tfds_version(self):\n with self.assertRaisesWithLiteralMatch(\n ValueError, \"TFDS name must contain a version number, got: fake\"):\n dataset_providers.TfdsDataSource(tfds_name=\"fake\")\n\n def test_tfds_splits(self):\n self.assertSameElements(\n [\"train\", \"validation\"],\n dataset_providers.TfdsDataSource(tfds_name=\"fake:0.0.0\").splits)\n self.assertSameElements(\n [\"validation\"],\n dataset_providers.TfdsDataSource(\n tfds_name=\"fake:0.0.0\", splits=[\"validation\"]).splits)\n self.assertSameElements(\n [\"validation\"],\n dataset_providers.TfdsDataSource(\n tfds_name=\"fake:0.0.0\", splits={\"validation\": \"train\"}).splits)\n\n def test_tfds_task(self):\n self.verify_task_matches_fake_datasets(\n \"tfds_task\", use_cached=False)\n\n def test_function_task(self):\n self.verify_task_matches_fake_datasets(\n \"function_task\", use_cached=False)\n\n def test_text_line_task(self):\n self.verify_task_matches_fake_datasets(\n \"text_line_task\", use_cached=False, splits=[\"train\"])\n\n def test_tf_example_task(self):\n self.verify_task_matches_fake_datasets(\n \"tf_example_task\", use_cached=False, splits=[\"train\"])\n\n @mock.patch.object(tf.io.gfile, \"glob\")\n def test_file_data_source_shuffle_buffer_low(self, mock_glob):\n mock_glob.return_value = [f\"{i}\" for i in range(20)]\n fds = dataset_providers.FileDataSource(\n read_file_fn=lambda x: tf.data.Dataset.from_tensor_slices([x]),\n split_to_filepattern={\"train\": \"filepattern\"},\n file_shuffle_buffer_size=2)\n for _ in range(10):\n ds = [\n d.decode() for d in tfds.as_numpy(\n fds.get_dataset(\"train\", shuffle=True, seed=23))\n ]\n self.assertListEqual(\n ds,\n [ # Not a great shuffle.\n \"0\", \"2\", \"1\", \"4\", \"5\", \"3\", \"7\", \"6\", \"9\", \"10\", \"11\", \"8\",\n \"13\", \"14\", \"12\", \"16\", \"15\", \"18\", \"17\", \"19\"\n ])\n\n @mock.patch.object(tf.io.gfile, \"glob\")\n def test_file_data_source_shuffle_buffer_full(self, mock_glob):\n mock_glob.return_value = [f\"{i}\" for i in range(20)]\n fds = dataset_providers.FileDataSource(\n read_file_fn=lambda x: tf.data.Dataset.from_tensor_slices([x]),\n split_to_filepattern={\"train\": \"filepattern\"},\n file_shuffle_buffer_size=None)\n for _ in range(10):\n ds = [\n d.decode() for d in tfds.as_numpy(\n fds.get_dataset(\"train\", shuffle=True, seed=23))\n ]\n self.assertListEqual(\n ds,\n [ # Good shuffle.\n \"2\", \"13\", \"12\", \"19\", \"15\", \"5\", \"9\", \"1\", \"6\", \"8\", \"3\", \"0\",\n \"10\", \"4\", \"14\", \"7\", \"16\", \"17\", \"18\", \"11\"\n ])\n\n def _get_preps_with_cache_placeholder_buffer_size(self, buffer_size):\n preps = list(self.DEFAULT_PREPROCESSORS)\n for i, p in enumerate(preps):\n if isinstance(p, dataset_providers.CacheDatasetPlaceholder):\n preps[i] = dataset_providers.CacheDatasetPlaceholder(\n file_shuffle_buffer_size=buffer_size)\n return preps\n\n def _mock_and_assert_cached_source(self, task_name, buffer_size):\n cached_task = dataset_providers.get_mixture_or_task(task_name)\n cached_task._get_cached_source = mock.MagicMock(\n side_effect=cached_task._get_cached_source)\n _ = cached_task.get_dataset(None, \"train\", use_cached=True)\n cached_task._get_cached_source.assert_called_once_with(\n \"train\", buffer_size)\n\n def test_cached_data_source_shuffle_buffer_default(self):\n self._mock_and_assert_cached_source(\"cached_task\", None)\n\n def test_cached_data_source_shuffle_buffer_set(self):\n self.add_task(\"cached_task_buf_2\", self.tfds_source,\n self._get_preps_with_cache_placeholder_buffer_size(2))\n shutil.copytree(self.cached_task_dir,\n os.path.join(self.test_data_dir, \"cached_task_buf_2\"))\n self._mock_and_assert_cached_source(\"cached_task_buf_2\", 2)\n\n def test_cached_data_source_shuffle_buffer_None(self):\n self.add_task(\"cached_task_buf_None\", self.tfds_source,\n self._get_preps_with_cache_placeholder_buffer_size(None))\n shutil.copytree(self.cached_task_dir,\n os.path.join(self.test_data_dir, \"cached_task_buf_None\"))\n self._mock_and_assert_cached_source(\"cached_task_buf_None\", None)\n\n def test_proto_task(self):\n self.verify_task_matches_fake_datasets(\n \"proto_task\", use_cached=False, splits=[\"train\"])\n\n def test_num_input_examples(self):\n self.assertEqual(30, self.cached_task.num_input_examples(\"train\"))\n self.assertEqual(10, self.cached_task.num_input_examples(\"validation\"))\n\n def test_disallow_shuffle(self):\n task = dataset_providers.Task(\n \"no_shuffle\",\n source=self.function_source,\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n preprocessors=self.DEFAULT_PREPROCESSORS,\n shuffle_buffer_size=None)\n\n with self.assertRaisesWithLiteralMatch(\n ValueError, \"Shuffling is disallowed for Task 'no_shuffle' since its \"\n \"`shuffle_buffer_size` was set to `None` on construction.\"):\n task.get_dataset(None, shuffle=True)\n\n with self.assertRaisesWithLiteralMatch(\n ValueError, \"Shuffling is disallowed for Task 'no_shuffle' since its \"\n \"`shuffle_buffer_size` was set to `None` on construction.\"):\n task.get_dataset(None, shuffle=True, shuffle_buffer_size=100)\n\n task.get_dataset(None, shuffle=False)\n\n def test_supports_caching(self):\n self.assertFalse(\n dataset_providers.Task(\n \"nosupports_cache\",\n source=self.function_source,\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n preprocessors=[]).supports_caching)\n\n self.assertFalse(\n dataset_providers.Task(\n \"nosupports_cache\",\n source=self.function_source,\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n preprocessors=[preprocessors.tokenize]).supports_caching)\n\n self.assertTrue(\n dataset_providers.Task(\n \"supports_cache\",\n source=self.function_source,\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n preprocessors=[\n preprocessors.tokenize,\n dataset_providers.CacheDatasetPlaceholder()\n ]).supports_caching)\n\n self.assertTrue(\n dataset_providers.Task(\n \"supports_cache\",\n source=self.function_source,\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n preprocessors=[\n dataset_providers.CacheDatasetPlaceholder(required=True),\n preprocessors.tokenize,\n ]).supports_caching)\n\n self.assertTrue(\n dataset_providers.Task(\n \"supports_cache\",\n source=self.function_source,\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n preprocessors=[\n dataset_providers.CacheDatasetPlaceholder(),\n ]).supports_caching)\n\n def test_requires_caching(self):\n self.assertFalse(\n dataset_providers.Task(\n \"nosupports_cache\",\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n source=self.function_source,\n preprocessors=[preprocessors.tokenize]).requires_caching)\n\n self.assertFalse(\n dataset_providers.Task(\n \"supports_cache\",\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n source=self.function_source,\n preprocessors=[\n preprocessors.tokenize,\n dataset_providers.CacheDatasetPlaceholder()\n ]).requires_caching)\n\n task = dataset_providers.Task(\n \"requires_cache\",\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n source=self.function_source,\n preprocessors=[\n dataset_providers.CacheDatasetPlaceholder(required=True),\n preprocessors.tokenize,\n ])\n\n self.assertTrue(task.requires_caching)\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Task 'requires_cache' requires caching, but was called with \"\n \"`use_cached=False`.\"):\n task.get_dataset({\"inputs\": 512, \"targets\": 512}, use_cached=False)\n\n # We haven't actually cached the task, so it still fails but with a\n # different error.\n with self.assertRaisesWithLiteralMatch(\n AssertionError,\n \"'requires_cache' does not exist in any of the task cache \"\n \"directories.\"):\n task.get_dataset({\"inputs\": 512, \"targets\": 512}, use_cached=True)\n\n def test_datasource_prohibits_caching(self):\n function_source_no_cache = dataset_providers.FunctionDataSource(\n dataset_fn=test_utils.get_fake_dataset,\n splits=[\"train\", \"validation\"],\n caching_permitted=False)\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Caching was requested for 'prohibits_cache', but the underlying data \"\n \"source prohibits caching. Please remove `CacheDatasetPlaceholder` and \"\n \"try again.\"\n ):\n dataset_providers.Task(\n \"prohibits_cache\",\n output_features=self.DEFAULT_OUTPUT_FEATURES,\n source=function_source_no_cache,\n preprocessors=[\n dataset_providers.CacheDatasetPlaceholder(required=True),\n preprocessors.tokenize,\n ])\n\n def test_cache_exists(self):\n self.assertTrue(self.cached_task.cache_dir)\n self.cached_task.assert_cached()\n self.assertEqual(\n os.path.join(self.test_data_dir, \"cached_task\"),\n self.cached_task.cache_dir)\n\n self.assertFalse(self.uncached_task.cache_dir)\n with self.assertRaisesWithLiteralMatch(\n AssertionError,\n \"'tfds_task' does not exist in any of the task cache directories.\"):\n TaskRegistry.get(\"tfds_task\").assert_cached()\n\n def test_get_cached_stats(self):\n expected_train_stats = {\n \"examples\": 3,\n \"inputs_tokens\": 36, \"inputs_max_tokens\": 13,\n \"targets_tokens\": 18, \"targets_max_tokens\": 6}\n self.assertEqual(\n expected_train_stats,\n self.cached_task.get_cached_stats(\"train\"))\n # Check repeated call.\n self.assertEqual(\n expected_train_stats,\n self.cached_task.get_cached_stats(\"train\"))\n expected_validation_stats = {\n \"examples\": 2,\n \"inputs_tokens\": 23, \"inputs_max_tokens\": 12,\n \"targets_tokens\": 36, \"targets_max_tokens\": 21}\n self.assertEqual(\n expected_validation_stats,\n self.cached_task.get_cached_stats(\"validation\"))\n with self.assertRaisesWithLiteralMatch(\n ValueError, \"Stats do not exist for 'cached_task' split: fake\"):\n self.cached_task.get_cached_stats(\"fake\")\n with self.assertRaisesWithLiteralMatch(\n AssertionError,\n \"'uncached_task' does not exist in any of the task cache directories.\"):\n self.uncached_task.get_cached_stats(\"train\")\n\n def test_set_global_cache_dirs(self):\n utils.set_global_cache_dirs([])\n self.assertFalse(self.cached_task.cache_dir)\n\n utils.set_global_cache_dirs([self.test_data_dir])\n self.assertTrue(self.cached_task.cache_dir)\n\n def test_get_dataset_cached(self):\n self.verify_task_matches_fake_datasets(\n \"cached_task\", use_cached=True, token_preprocessed=False)\n\n # Test with token preprocessor.\n self.cached_task._preprocessors = self.DEFAULT_PREPROCESSORS + (\n test_utils.test_token_preprocessor,)\n self.verify_task_matches_fake_datasets(\n \"cached_task\", use_cached=True, token_preprocessed=True)\n\n def test_get_dataset_onthefly(self):\n self.verify_task_matches_fake_datasets(\n \"uncached_task\", use_cached=False)\n\n # Test with token preprocessor.\n self.cached_task._preprocessors = self.DEFAULT_PREPROCESSORS + (\n test_utils.test_token_preprocessor,)\n self.verify_task_matches_fake_datasets(\n \"cached_task\", use_cached=False, token_preprocessed=True)\n\n def test_get_dataset_no_truncation(self):\n self.verify_task_matches_fake_datasets(\n \"uncached_task\", use_cached=False, sequence_length=None)\n\n def test_sharding(self):\n for i in range(3):\n self.verify_task_matches_fake_datasets(\n \"cached_task\", use_cached=False, num_shards=i,\n token_preprocessed=False)\n self.verify_task_matches_fake_datasets(\n \"cached_task\", use_cached=True, num_shards=i,\n token_preprocessed=False)\n\n def test_feature_validation(self):\n default_vocab = test_utils.sentencepiece_vocab()\n features = {\n \"inputs\":\n dataset_providers.Feature(vocabulary=default_vocab, required=False),\n \"targets\":\n dataset_providers.Feature(vocabulary=default_vocab, required=True),\n \"inputs_rank2\":\n dataset_providers.Feature(\n vocabulary=vocabularies.PassThroughVocabulary(5),\n required=False,\n rank=2),\n \"continuous_features\":\n dataset_providers.ContinuousFeature(\n required=False,\n rank=2)\n }\n\n def _materialize(output):\n task = dataset_providers.Task(\n \"feature_validation_task\",\n self.function_source,\n output_features=features,\n preprocessors=(lambda _: tf.data.Dataset.from_tensors(output),),\n metric_fns=[],\n )\n list(\n task.get_dataset(\n {\"inputs\": 13, \"targets\": 13, \"inputs_rank2\": 13}, \"train\",\n use_cached=False\n ).as_numpy_iterator()\n )\n\n # Missing optional feature: OK\n _materialize({\"targets\": [0]})\n\n # Missing required feature.\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Task dataset is missing expected output feature after preprocessing: \"\n \"targets\"):\n _materialize({\"inputs\": [0]})\n\n # Wrong type.\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Task dataset has incorrect type for feature 'targets' after \"\n \"preprocessing: Got string, expected int32\"):\n _materialize({\"targets\": [\"wrong type\"]})\n\n # Wrong rank.\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Task dataset has incorrect rank for feature 'targets' after \"\n \"preprocessing: Got 0, expected 1\"):\n _materialize({\"targets\": 0})\n\n # Verify rank > 1 works.\n _materialize({\"targets\": [0], \"inputs_rank2\": [[0, 0, 0], [0, 0, 0]]})\n\n # Wrong rank (1 when 2 is expected).\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"Task dataset has incorrect rank for feature 'inputs_rank2' after \"\n \"preprocessing: Got 1, expected 2\"):\n _materialize({\"targets\": [0], \"inputs_rank2\": [0]})\n # Test ContinuousFeature\n _materialize({\n \"targets\": [0],\n \"continuous_features\": [[1, 1], [0, 1]]\n })\n\n def test_value_errors(self):\n dataset_fn = (\n lambda split, shuffle_files: tf.data.Dataset.from_tensors([\"test\"]))\n output_features = {\n \"inputs\": dataset_providers.Feature(test_utils.sentencepiece_vocab())\n }\n\n with self.assertRaisesWithLiteralMatch(\n ValueError, \"`CacheDatasetPlaceholder` can appear at most once in the \"\n \"preprocessing pipeline. Found 2 in 'multiple_cache_placeholders'.\"):\n dataset_providers.Task(\n \"multiple_cache_placeholders\",\n source=dataset_providers.FunctionDataSource(\n dataset_fn=dataset_fn,\n splits=[\"train\", \"validation\"]\n ),\n preprocessors=[\n test_utils.test_text_preprocessor,\n preprocessors.tokenize,\n dataset_providers.CacheDatasetPlaceholder(),\n test_utils.test_token_preprocessor,\n dataset_providers.CacheDatasetPlaceholder()\n ],\n output_features=output_features,\n metric_fns=[])\n\n with self.assertRaisesWithLiteralMatch(\n ValueError,\n \"'test_token_preprocessor' has a `sequence_length` argument but occurs \"\n \"before `CacheDatasetPlaceholder` in 'sequence_length_pre_cache'. This \"\n \"is not allowed since the sequence length is specified at run time.\"):\n dataset_providers.Task(\n \"sequence_length_pre_cache\",\n dataset_providers.FunctionDataSource(\n dataset_fn=dataset_fn,\n splits=[\"train\"],\n ),\n preprocessors=[\n test_utils.test_text_preprocessor,\n preprocessors.tokenize,\n test_utils.test_token_preprocessor,\n dataset_providers.CacheDatasetPlaceholder()\n ],\n output_features=output_features,\n metric_fns=[])\n\n def test_tfds_source_splits(self):\n default_splits_src = dataset_providers.TfdsDataSource(\"fake:0.0.0\")\n self.assertSameElements([\"train\", \"validation\"], default_splits_src.splits)\n\n validation_split_src = dataset_providers.TfdsDataSource(\n \"fake:0.0.0\", splits=[\"validation\"])\n self.assertSameElements([\"validation\"], validation_split_src.splits)\n\n sliced_split_src = dataset_providers.TfdsDataSource(\n \"fake:0.0.0\", splits={\"validation\": \"train[0:1%]\"})\n self.assertSameElements([\"validation\"], sliced_split_src.splits)\n\n def test_no_eos(self):\n default_vocab = test_utils.sentencepiece_vocab()\n features = {\n \"inputs\":\n dataset_providers.Feature(add_eos=True, vocabulary=default_vocab),\n \"targets\":\n dataset_providers.Feature(add_eos=False, vocabulary=default_vocab),\n }\n self.add_task(\"task_no_eos\", self.function_source, output_features=features)\n self.verify_task_matches_fake_datasets(\"task_no_eos\", use_cached=False)\n\n def test_dtype(self):\n default_vocab = test_utils.sentencepiece_vocab()\n features = {\n \"inputs\":\n # defaults to int32\n dataset_providers.Feature(vocabulary=default_vocab),\n \"targets\":\n dataset_providers.Feature(dtype=tf.int64, vocabulary=default_vocab),\n }\n\n self.add_task(\n \"task_dtypes\",\n self.function_source,\n preprocessors=self.DEFAULT_PREPROCESSORS + (\n utils.map_over_dataset(\n lambda x: {k: tf.cast(v, tf.int64) if k == \"targets\" else v # pylint:disable=g-long-lambda\n for k, v in x.items()}\n ),\n ),\n output_features=features\n )\n self.verify_task_matches_fake_datasets(\"task_dtypes\", use_cached=False)\n\n def test_num_epochs(self):\n # Try repeating after preprocessing the dataset to verify the outputs are\n # the same.\n epoch1_ds = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0)\n # `random_task` has 3 examples per epoch.\n epoch2_ds = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0\n ).repeat(2).skip(3)\n test_utils.assert_datasets_eq(epoch1_ds, epoch2_ds)\n\n # Try repeating before preprocessing the dataset to verify the outputs are\n # different.\n epoch1_ds = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0)\n # `random_task` has 3 examples per epoch.\n epoch2_ds = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0, num_epochs=2\n ).skip(3)\n test_utils.assert_datasets_neq(epoch1_ds, epoch2_ds)\n\n def test_same_seeds_cached_match(self):\n dataset1 = self.cached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=True, seed=0)\n dataset2 = self.cached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=True, seed=0)\n test_utils.assert_datasets_eq(dataset1, dataset2)\n\n def test_different_seeds_cached_mismatch(self):\n dataset1 = self.cached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=True, seed=0)\n dataset2 = self.cached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=True, seed=42)\n test_utils.assert_datasets_neq(dataset1, dataset2)\n\n def test_same_seeds_uncached_match(self):\n dataset1 = self.uncached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0)\n dataset2 = self.uncached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0)\n test_utils.assert_datasets_eq(dataset1, dataset2)\n\n def test_different_seeds_uncached_mismatch(self):\n dataset1 = self.uncached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0)\n dataset2 = self.uncached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=42)\n test_utils.assert_datasets_neq(dataset1, dataset2)\n\n def test_same_seeds_random_tp_uncached_match(self):\n dataset1 = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0).repeat(4)\n dataset2 = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0).repeat(4)\n test_utils.assert_datasets_eq(dataset1, dataset2)\n\n def test_different_seeds_random_tp_uncached_mismatch(self):\n dataset1 = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=0)\n dataset2 = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=True, seed=42)\n test_utils.assert_datasets_neq(dataset1, dataset2)\n\n def test_no_shuffle_with_seed_cached_match(self):\n dataset1 = self.cached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=False, seed=0)\n dataset2 = self.cached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=False, seed=42)\n test_utils.assert_datasets_eq(dataset1, dataset2)\n\n def test_no_shuffle_with_seed_uncached_match(self):\n dataset1 = self.uncached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=False, seed=0)\n dataset2 = self.uncached_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=False, seed=42)\n test_utils.assert_datasets_eq(dataset1, dataset2)\n\n def test_no_shuffle_different_seeds_random_tp_uncached_mismatch(self):\n dataset1 = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=False, seed=0)\n dataset2 = self.random_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=False, shuffle=False, seed=42)\n test_utils.assert_datasets_neq(dataset1, dataset2)\n\n def test_plaintext_to_pretokenized_rename(self):\n ds = self.cached_plaintext_task.get_dataset(\n {\"inputs\": 13, \"targets\": 13},\n split=\"train\", use_cached=True, shuffle=False)\n keys = next(ds.as_numpy_iterator()).keys()\n self.assertSetEqual(\n set(keys),\n set([\"inputs\", \"inputs_pretokenized\",\n \"targets\", \"targets_pretokenized\"]))\n\n def test_list_shards(self):\n\n def _get_formatted_shards_list(task_name, split):\n shards = dataset_providers.get_mixture_or_task(\n task_name).source.list_shards(split)\n shards = [s.split(\"/\")[-1] for s in shards]\n return sorted(shards)\n\n self.assertListEqual(\n _get_formatted_shards_list(\"tfds_task\", \"train\"),\n [\"train.tfrecord-00000-of-00002\", \"train.tfrecord-00001-of-00002\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"text_line_task\", \"train\"),\n [\"train.tsv-00000-of-00002\", \"train.tsv-00001-of-00002\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"tf_example_task\", \"train\"),\n [\"train.tfrecord-00000-of-00002\", \"train.tfrecord-00001-of-00002\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"proto_task\", \"train\"),\n [\"train.tfrecord-00000-of-00002\", \"train.tfrecord-00001-of-00002\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"function_task\", \"train\"), [\"train\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"fully_processed_precache\", \"train\"),\n [\"train\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"tokenized_postcache\", \"train\"), [\"train\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"random_task\", \"train\"), [\"train\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"uncached_task\", \"train\"),\n [\"train.tfrecord-00000-of-00002\", \"train.tfrecord-00001-of-00002\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"cached_task\", \"train\"),\n [\"train.tfrecord-00000-of-00002\", \"train.tfrecord-00001-of-00002\"])\n self.assertListEqual(\n _get_formatted_shards_list(\"cached_plaintext_task\", \"train\"),\n [\"train.tfrecord-00000-of-00002\", \"train.tfrecord-00001-of-00002\"])\n\n\nclass MixturesTest(test_utils.FakeTaskTest):\n\n def test_tasks(self):\n self.add_task(\"task1\", self.function_source)\n self.add_task(\"task2\", self.function_source)\n MixtureRegistry.add(\"test_mix1\", [(\"task1\", 1), (\"task2\", 1)])\n mix = MixtureRegistry.get(\"test_mix1\")\n self.assertEqual(len(mix.tasks), 2)\n\n for task in mix.tasks:\n self.verify_task_matches_fake_datasets(task.name, use_cached=False)\n self.assertEqual(mix.get_rate(task), 1)\n\n def test_num_examples(self):\n MixtureRegistry.add(\"test_mix2\", [(self.cached_task.name, 1)])\n mix = MixtureRegistry.get(\"test_mix2\")\n self.assertEqual(mix.num_input_examples(split=\"train\"), 30)\n\n def test_splits(self):\n MixtureRegistry.add(\n \"test_mix\",\n [(self.cached_task.name, 1), (self.uncached_task.name, 1)]\n )\n mix = MixtureRegistry.get(\"test_mix\")\n self.assertSameElements([\"train\", \"validation\"], mix.splits, 30)\n\n def test_get_dataset(self):\n MixtureRegistry.add(\"test_mix3\", [(self.cached_task.name, 1)])\n\n task_ds = TaskRegistry.get_dataset(\n self.cached_task.name, {\n \"inputs\": 13,\n \"targets\": 13\n },\n \"validation\",\n use_cached=False,\n shuffle=False)\n\n mix_ds = MixtureRegistry.get(\"test_mix3\").get_dataset(\n {\n \"inputs\": 13,\n \"targets\": 13\n }, \"validation\", use_cached=False, shuffle=False)\n\n # mix.get_dataset strips non-output features\n task_ds = task_ds.map(lambda x: {k: x[k] for k in [\"inputs\", \"targets\"]})\n\n # limit size since get_dataset repeats the dataset\n test_utils.assert_datasets_eq(task_ds.repeat(2), mix_ds.take(4))\n\n def test_get_dataset_mix(self):\n @utils.map_over_dataset\n def _constant_preprocessor(unused_x, val):\n return {\n \"targets\": tf.constant([val], tf.int32),\n \"inputs\": tf.constant([val], tf.int32),\n }\n\n self.add_task(\n \"two_task\",\n self.function_source,\n preprocessors=(functools.partial(_constant_preprocessor, val=2),)\n )\n\n self.add_task(\n \"three_task\",\n self.function_source,\n preprocessors=(functools.partial(_constant_preprocessor, val=3),)\n )\n\n MixtureRegistry.add(\"test_mix\", [(\"two_task\", 1), (\"three_task\", 1)])\n\n sequence_length = {\"inputs\": 2, \"targets\": 2}\n mix_ds = MixtureRegistry.get(\"test_mix\").get_dataset(\n sequence_length, \"train\", seed=13).take(1000)\n\n res = sum(int(item[\"inputs\"][0]) for item in mix_ds.as_numpy_iterator())\n self.assertEqual(res, 2481)\n\n def test_get_dataset_passthrough_features(self):\n\n @utils.map_over_dataset\n def _constant_feature_preprocessor(unused_x, val):\n return {\n \"targets\": tf.constant([val], tf.int32),\n \"inputs\": tf.constant([val], tf.int32),\n \"feature\": tf.constant([val], tf.int32),\n }\n\n self.add_task(\n \"two_task\",\n self.function_source,\n preprocessors=(functools.partial(_constant_feature_preprocessor,\n val=2),))\n\n self.add_task(\n \"three_task\",\n self.function_source,\n preprocessors=(functools.partial(_constant_feature_preprocessor,\n val=3),))\n\n MixtureRegistry.add(\"test_mix\", [(\"two_task\", 1), (\"three_task\", 1)])\n\n sequence_length = {\"inputs\": 2, \"targets\": 2}\n passthrough_features = [\"feature\"]\n mix_ds = MixtureRegistry.get(\"test_mix\").get_dataset(\n sequence_length,\n \"train\",\n seed=13,\n passthrough_features=passthrough_features).take(1000)\n\n # output features are defined as \"inputs\" and \"targets\" by default.\n res = sum(int(item[\"feature\"][0]) for item in mix_ds.as_numpy_iterator())\n self.assertEqual(res, 2481)\n\n def test_copy_pretokenized(self):\n @utils.map_over_dataset\n def _constant_preprocessor(unused_x, val):\n return {\n \"targets\": tf.constant([val], tf.int32),\n \"targets_pretokenized\": tf.constant(f\"targets_{val}\"),\n \"inputs\": tf.constant([val], tf.int32),\n \"inputs_pretokenized\": tf.constant(f\"inputs_{val}\")\n }\n\n self.add_task(\n \"two_task\",\n self.function_source,\n preprocessors=(functools.partial(_constant_preprocessor, val=2),)\n )\n\n self.add_task(\n \"three_task\",\n self.function_source,\n preprocessors=(functools.partial(_constant_preprocessor, val=3),)\n )\n\n MixtureRegistry.add(\"test_mix\", [(\"two_task\", 1), (\"three_task\", 1)])\n\n sequence_length = {\"inputs\": 2, \"targets\": 2}\n\n mix_ds = MixtureRegistry.get(\"test_mix\").get_dataset(\n sequence_length, \"train\", seed=13, copy_pretokenized=True).take(1000)\n inputs_pretokenized = set(\n ex[\"inputs_pretokenized\"] for ex in mix_ds.as_numpy_iterator())\n targets_pretokenized = set(\n ex[\"targets_pretokenized\"] for ex in mix_ds.as_numpy_iterator())\n self.assertCountEqual([b\"inputs_2\", b\"inputs_3\"], inputs_pretokenized)\n self.assertCountEqual([b\"targets_2\", b\"targets_3\"], targets_pretokenized)\n\n mix_ds = MixtureRegistry.get(\"test_mix\").get_dataset(\n sequence_length, \"train\", seed=13, copy_pretokenized=False).take(1000)\n for ex in mix_ds.as_numpy_iterator():\n self.assertNoCommonElements(\n [\"inputs_pretokenized\", \"targets_pretokenized\"], ex.keys())\n\n def test_get_rate_with_callable(self):\n def fn(t):\n self.assertEqual(t.name, \"task4\")\n return 42\n self.add_task(\"task4\", self.function_source)\n task = TaskRegistry.get(\"task4\")\n MixtureRegistry.add(\"test_mix5\", [(\"task4\", fn)])\n mix = MixtureRegistry.get(\"test_mix5\")\n self.assertEqual(mix.get_rate(task), 42)\n\n def test_mixture_of_mixtures(self):\n self.add_task(\"task_a\", self.function_source)\n self.add_task(\"task_b\", self.function_source)\n self.add_task(\"task_c\", self.function_source)\n MixtureRegistry.add(\"another_mix\", [(\"task_a\", 1), (\"task_b\", 1)])\n MixtureRegistry.add(\"supermix\", [(\"another_mix\", 1), (\"task_c\", 1)])\n supermix = MixtureRegistry.get(\"supermix\")\n names = [task.name for task in supermix.tasks]\n self.assertEqual(names, [\"task_a\", \"task_b\", \"task_c\"])\n self.assertEqual([supermix.get_rate(t) for t in supermix.tasks],\n [0.5, 0.5, 1])\n\n def test_mixture_of_mixtures_dupe(self):\n self.add_task(\"task2_a\", self.function_source)\n self.add_task(\"task2_b\", self.function_source)\n self.add_task(\"task2_c\", self.function_source)\n MixtureRegistry.add(\"yet_another_mix\", [(\"task2_a\", 1), (\"task2_b\", 1)])\n MixtureRegistry.add(\"supermix_with_dupe\", [(\"yet_another_mix\", 1),\n (\"task2_a\", 1), (\"task2_c\", 1)])\n supermix = MixtureRegistry.get(\"supermix_with_dupe\")\n names = [task.name for task in supermix.tasks]\n self.assertEqual(names, [\"task2_a\", \"task2_b\", \"task2_c\"])\n self.assertEqual([supermix.get_rate(t) for t in supermix.tasks],\n [1.5, 0.5, 1])\n\n def test_mixture_with_sample_fn(self):\n\n def sequential_intereave(datasets: Sequence[tf.data.Dataset],\n rates: Sequence[float],\n sample_seed: Optional[int]) -> tf.data.Dataset:\n \"\"\"Sample function that simply concatenates two datasets.\"\"\"\n del rates, sample_seed\n return datasets[0].concatenate(datasets[1])\n\n def gen_dataset(split,\n shuffle_files=False,\n seed=None,\n val: str = \"\") -> tf.data.Dataset:\n del split, shuffle_files, seed # Need this to pass arg validation.\n return tf.data.Dataset.from_tensor_slices({\n \"inputs\": [[val]] * 3,\n })\n\n # Register two very simple tasks, each with 3 repeated string values.\n vocab = vocabularies.PassThroughVocabulary(0)\n tasks = []\n for task_name in [\"first\", \"second\"]:\n tasks.append(self.add_task(\n task_name,\n dataset_providers.FunctionDataSource(\n dataset_fn=functools.partial(gen_dataset, val=task_name),\n splits=[\"train\"]),\n preprocessors=[],\n output_features={\n \"inputs\": dataset_providers.Feature(vocab, dtype=tf.string)\n }))\n\n # Verify that by default, interleaving of datasets is random.\n MixtureRegistry.add(\"default_mix\", [(\"first\", 1), (\"second\", 1)])\n default_ds = MixtureRegistry.get(\"default_mix\").get_dataset(\n None, \"train\", shuffle=False, seed=2, num_epochs=1)\n expected = [b\"second\", b\"first\", b\"second\", b\"first\", b\"second\", b\"first\"]\n actual = [x[\"inputs\"] for x in default_ds.as_numpy_iterator()]\n self.assertEqual(expected, actual)\n\n # Verify that we can modify sampling function correctly.\n MixtureRegistry.add(\n \"sequential_mix\", [(\"first\", 1), (\"second\", 1)],\n sample_fn=sequential_intereave)\n sequential_ds = MixtureRegistry.get(\"sequential_mix\").get_dataset(\n None, \"train\", shuffle=False, seed=2, num_epochs=1)\n expected = [b\"first\"] * 3 + [b\"second\"] * 3\n actual = [x[\"inputs\"] for x in sequential_ds.as_numpy_iterator()]\n self.assertEqual(expected, actual)\n\n\nclass GetDatasetTest(parameterized.TestCase, tf.test.TestCase):\n\n def test_get_dataset_enc_dec_unpacked(self):\n mixture_or_task_name = \"enc_dec_unpacked\"\n x = [{\"inputs\": [7, 8, 5, 6, 9, 4, 3], \"targets\": [3, 9]},\n {\"inputs\": [8, 4], \"targets\": [4]},\n {\"inputs\": [5, 6, 7], \"targets\": [6, 5]}]\n ds = create_default_dataset(x)\n dataset_fn = lambda split, shuffle_files: ds\n register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)\n\n task_feature_lengths = {\"inputs\": 7, \"targets\": 5}\n converter = feature_converters.EncDecFeatureConverter(pack=False)\n output_ds = dataset_providers.get_dataset(\n mixture_or_task_name=mixture_or_task_name,\n task_feature_lengths=task_feature_lengths,\n dataset_split=\"train\",\n shuffle=False,\n feature_converter=converter)\n\n expected = [{\n \"encoder_input_tokens\": [7, 8, 5, 6, 9, 4, 1],\n \"decoder_target_tokens\": [3, 9, 1, 0, 0],\n \"decoder_input_tokens\": [0, 3, 9, 1, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n }, {\n \"encoder_input_tokens\": [8, 4, 1, 0, 0, 0, 0],\n \"decoder_target_tokens\": [4, 1, 0, 0, 0],\n \"decoder_input_tokens\": [0, 4, 1, 0, 0],\n \"decoder_loss_weights\": [1, 1, 0, 0, 0],\n }, {\n \"encoder_input_tokens\": [5, 6, 7, 1, 0, 0, 0],\n \"decoder_target_tokens\": [6, 5, 1, 0, 0],\n \"decoder_input_tokens\": [0, 6, 5, 1, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n }]\n expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}\n assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)\n\n @parameterized.parameters(\n dict(\n task_name=\"enc_dec_partial_trim_both\",\n task_feature_lengths={\n \"inputs\": 7,\n \"targets\": 2\n },\n expect_trim_inputs=True,\n expect_trim_targets=True),\n dict(\n task_name=\"enc_dec_partial_trim_targets\",\n task_feature_lengths={\n \"inputs\": None,\n \"targets\": 2\n },\n expect_trim_inputs=False,\n expect_trim_targets=True),\n dict(\n task_name=\"enc_dec_partial_trim_inputs\",\n task_feature_lengths={\n \"inputs\": 7,\n \"targets\": None\n },\n expect_trim_inputs=True,\n expect_trim_targets=False),\n dict(\n task_name=\"enc_dec_partial_trim_neither\",\n task_feature_lengths={\n \"inputs\": None,\n \"targets\": None\n },\n expect_trim_inputs=False,\n expect_trim_targets=False),\n dict(\n task_name=\"enc_dec_partial_trim_nothing\",\n task_feature_lengths=None,\n expect_trim_inputs=False,\n expect_trim_targets=False))\n def test_partial_sequence_length(self, task_name, task_feature_lengths,\n expect_trim_inputs, expect_trim_targets):\n x = [{\"inputs\": [7, 8, 5, 6, 9, 4, 3], \"targets\": [3, 9]},\n {\"inputs\": [8, 4], \"targets\": [4]},\n {\"inputs\": [5, 6, 7], \"targets\": [6, 5]}]\n ds = create_default_dataset(x)\n dataset_fn = lambda split, shuffle_files: ds\n register_dummy_task(task_name, dataset_fn=dataset_fn)\n # Unlike the other tests, don't use a feature converter. Instead, test the\n # task.get_dataset method directly, which is similar to how evaluation.py\n # infers feature lengths w/trimming.\n task = dataset_providers.get_mixture_or_task(task_name)\n output_ds = task.get_dataset(\n sequence_length=task_feature_lengths,\n shuffle=False)\n\n expected = [{\n \"inputs\": [7, 8, 5, 6, 9, 4, 3, 1],\n \"targets\": [3, 9, 1],\n }, {\n \"inputs\": [8, 4, 1],\n \"targets\": [4, 1],\n }, {\n \"inputs\": [5, 6, 7, 1],\n \"targets\": [6, 5, 1],\n }]\n if expect_trim_inputs:\n expected[0][\"inputs\"] = [7, 8, 5, 6, 9, 4, 1]\n if expect_trim_targets:\n expected[0][\"targets\"] = [3, 1]\n expected[2][\"targets\"] = [6, 1]\n expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}\n assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)\n\n @parameterized.parameters(\n dict(\n task_name=\"enc_dec_multidim_trim_both\",\n task_feature_lengths={\n \"inputs\": (2, 5),\n \"targets\": 2\n },\n expect_trim_inputs=True,\n expect_trim_targets=True,\n ),\n dict(\n task_name=\"enc_dec_multidim_trim_inputs\",\n task_feature_lengths={\n \"inputs\": (2, 5),\n \"targets\": None\n },\n expect_trim_inputs=True,\n expect_trim_targets=False,\n ),\n dict(\n task_name=\"enc_dec_multidim_trim_targets\",\n task_feature_lengths={\n \"inputs\": None,\n \"targets\": 2\n },\n expect_trim_inputs=False,\n expect_trim_targets=True,\n ),\n dict(\n task_name=\"enc_dec_no_multidim_trim\",\n task_feature_lengths={\n \"inputs\": None,\n \"targets\": None\n },\n expect_trim_inputs=False,\n expect_trim_targets=False\n )\n )\n def test_multidimension_sequence_length(self,\n task_name,\n task_feature_lengths,\n expect_trim_inputs,\n expect_trim_targets):\n x = [{\"inputs\": [[7, 8, 5, 6, 9, 4, 3],\n [2, 3, 4, 5, 0, 0, 0],\n [6, 7, 1, 0, 0, 0, 0]],\n \"targets\": [3, 9]},\n {\"inputs\": [[8, 4],\n [1, 0],\n [2, 3]],\n \"targets\": [4]},\n {\"inputs\": [[5, 6, 7]],\n \"targets\": [6, 5, 1]},\n {\"inputs\": [[7, 8, 9, 1, 2, 3, 4, 5, 6]],\n \"targets\": [10, 11, 1]}]\n ds = tf.data.Dataset.from_generator(\n lambda: x,\n output_types={\"inputs\": tf.int32, \"targets\": tf.int32},\n output_shapes={\"inputs\": (None, None), \"targets\": (None,)})\n dataset_fn = lambda split, shuffle_files: ds\n dataset_providers.TaskRegistry.add(\n task_name,\n source=dataset_providers.FunctionDataSource(\n dataset_fn=dataset_fn, splits=[\"train\", \"validation\"]),\n preprocessors=[\n dataset_providers.CacheDatasetPlaceholder(),\n ],\n output_features={\n \"inputs\": dataset_providers.Feature(\n test_utils.sentencepiece_vocab(), rank=2),\n \"targets\": dataset_providers.Feature(\n test_utils.sentencepiece_vocab())\n },\n metric_fns=[])\n # Unlike the other tests, don't use a feature converter. Instead, test the\n # task.get_dataset method directly, which is similar to how evaluation.py\n # infers feature lengths w/trimming.\n task = dataset_providers.get_mixture_or_task(task_name)\n output_ds = task.get_dataset(\n sequence_length=task_feature_lengths,\n shuffle=False)\n\n expected = copy.deepcopy(x)\n if expect_trim_inputs:\n expected[0][\"inputs\"] = [[7, 8, 5, 6, 9],\n [2, 3, 4, 5, 0]]\n expected[1][\"inputs\"] = [[8, 4],\n [1, 0]]\n expected[3][\"inputs\"] = [[7, 8, 9, 1, 2]]\n if expect_trim_targets:\n expected[2][\"targets\"] = [6, 5]\n expected[3][\"targets\"] = [10, 11]\n expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}\n assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)\n\n def test_get_dataset_enc_dec_packed(self):\n mixture_or_task_name = \"enc_dec_packed\"\n x = [{\"inputs\": [7, 8, 5, 6, 9, 4, 3], \"targets\": [3, 9]},\n {\"inputs\": [8, 4], \"targets\": [4]},\n {\"inputs\": [5, 6, 7], \"targets\": [6, 5]}]\n ds = create_default_dataset(x)\n dataset_fn = lambda split, shuffle_files: ds\n register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)\n\n task_feature_lengths = {\"inputs\": 7, \"targets\": 5}\n converter = feature_converters.EncDecFeatureConverter(pack=True)\n output_ds = dataset_providers.get_dataset(\n mixture_or_task_name=mixture_or_task_name,\n task_feature_lengths=task_feature_lengths,\n dataset_split=\"train\",\n shuffle=False,\n feature_converter=converter)\n\n expected = [{\n # Example 1 is trimmed\n \"encoder_input_tokens\": [7, 8, 5, 6, 9, 4, 1],\n \"encoder_segment_ids\": [1, 1, 1, 1, 1, 1, 1],\n \"encoder_positions\": [0, 1, 2, 3, 4, 5, 6],\n \"decoder_target_tokens\": [3, 9, 1, 0, 0],\n \"decoder_input_tokens\": [0, 3, 9, 0, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n \"decoder_segment_ids\": [1, 1, 1, 0, 0],\n \"decoder_positions\": [0, 1, 2, 0, 0],\n }, {\n # Example 2 and 3 are packed together\n \"encoder_input_tokens\": [8, 4, 1, 5, 6, 7, 1],\n \"encoder_segment_ids\": [1, 1, 1, 2, 2, 2, 2],\n \"encoder_positions\": [0, 1, 2, 0, 1, 2, 3],\n \"decoder_target_tokens\": [4, 1, 6, 5, 1],\n \"decoder_input_tokens\": [0, 4, 0, 6, 5],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1],\n \"decoder_segment_ids\": [1, 1, 2, 2, 2],\n \"decoder_positions\": [0, 1, 0, 1, 2],\n }]\n expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}\n assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)\n\n def test_get_dataset_both_train_and_validation_splits(self):\n mixture_or_task_name = \"both_train_and_validation_splits\"\n x_train = [{\"inputs\": [7, 8, 5, 6, 9, 4, 3], \"targets\": [3, 9]}]\n x_val = [{\"inputs\": [8, 4], \"targets\": [4]}]\n datasets = {\n \"train\": create_default_dataset(x_train),\n \"validation\": create_default_dataset(x_val)\n }\n dataset_fn = lambda split, shuffle_files: datasets[split]\n register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)\n\n task_feature_lengths = {\"inputs\": 7, \"targets\": 5}\n output_ds = {}\n for split in [\"train\", \"validation\"]:\n converter = feature_converters.EncDecFeatureConverter(pack=False)\n output_ds[split] = dataset_providers.get_dataset(\n mixture_or_task_name=mixture_or_task_name,\n task_feature_lengths=task_feature_lengths,\n dataset_split=split,\n shuffle=False,\n feature_converter=converter)\n\n expected_train = {\n \"encoder_input_tokens\": [7, 8, 5, 6, 9, 4, 1],\n \"decoder_target_tokens\": [3, 9, 1, 0, 0],\n \"decoder_input_tokens\": [0, 3, 9, 1, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n }\n expected_val = {\n \"encoder_input_tokens\": [8, 4, 1, 0, 0, 0, 0],\n \"decoder_target_tokens\": [4, 1, 0, 0, 0],\n \"decoder_input_tokens\": [0, 4, 1, 0, 0],\n \"decoder_loss_weights\": [1, 1, 0, 0, 0],\n }\n expected_dtypes = {feat: tf.int32 for feat in expected_train.keys()}\n assert_dataset(\n output_ds[\"train\"], expected_train, expected_dtypes=expected_dtypes)\n assert_dataset(\n output_ds[\"validation\"], expected_val, expected_dtypes=expected_dtypes)\n\n def test_get_dataset_enc_dec_sharded(self):\n mixture_or_task_name = \"enc_dec_sharded\"\n x = [{\"inputs\": [7, 8, 5, 6, 9, 4, 3], \"targets\": [3, 9]},\n {\"inputs\": [8, 4], \"targets\": [4]},\n {\"inputs\": [5, 6, 7], \"targets\": [6, 5]}]\n ds = create_default_dataset(x)\n dataset_fn = lambda split, shuffle_files: ds\n register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)\n\n task_feature_lengths = {\"inputs\": 7, \"targets\": 5}\n converter = feature_converters.EncDecFeatureConverter(pack=False)\n shard_info = dataset_providers.ShardInfo(index=0, num_shards=2)\n output_ds = dataset_providers.get_dataset(\n mixture_or_task_name=mixture_or_task_name,\n task_feature_lengths=task_feature_lengths,\n dataset_split=\"train\",\n shuffle=False,\n feature_converter=converter,\n shard_info=shard_info)\n\n # Example index 1 should not be present in the sharded dataset.\n expected = [{\n \"encoder_input_tokens\": [7, 8, 5, 6, 9, 4, 1],\n \"decoder_target_tokens\": [3, 9, 1, 0, 0],\n \"decoder_input_tokens\": [0, 3, 9, 1, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n }, {\n \"encoder_input_tokens\": [5, 6, 7, 1, 0, 0, 0],\n \"decoder_target_tokens\": [6, 5, 1, 0, 0],\n \"decoder_input_tokens\": [0, 6, 5, 1, 0],\n \"decoder_loss_weights\": [1, 1, 1, 0, 0],\n }]\n expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}\n assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)\n\n def test_get_dataset_enc_dec_sharded_and_packed(self):\n mixture_or_task_name = \"enc_dec_sharded_and_packed\"\n x = [{\"inputs\": [7, 8], \"targets\": [3, 9]},\n {\"inputs\": [8, 4], \"targets\": [4]},\n {\"inputs\": [5, 6, 7], \"targets\": [6]}]\n ds = create_default_dataset(x)\n dataset_fn = lambda split, shuffle_files: ds\n register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)\n\n task_feature_lengths = {\"inputs\": 7, \"targets\": 5}\n converter = feature_converters.EncDecFeatureConverter(pack=True)\n shard_info = dataset_providers.ShardInfo(index=0, num_shards=2)\n output_ds = dataset_providers.get_dataset(\n mixture_or_task_name=mixture_or_task_name,\n task_feature_lengths=task_feature_lengths,\n dataset_split=\"train\",\n shuffle=False,\n feature_converter=converter,\n shard_info=shard_info)\n\n # Packing should be done after the sharding.\n expected = {\n \"encoder_input_tokens\": [7, 8, 1, 5, 6, 7, 1],\n \"encoder_segment_ids\": [1, 1, 1, 2, 2, 2, 2],\n \"encoder_positions\": [0, 1, 2, 0, 1, 2, 3],\n \"decoder_target_tokens\": [3, 9, 1, 6, 1],\n \"decoder_input_tokens\": [0, 3, 9, 0, 6],\n \"decoder_loss_weights\": [1, 1, 1, 1, 1],\n \"decoder_segment_ids\": [1, 1, 1, 2, 2],\n \"decoder_positions\": [0, 1, 2, 0, 1],\n }\n expected_dtypes = {feat: tf.int32 for feat in expected.keys()}\n assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)\n\n\ndef register_dummy_task(\n task_name: str,\n dataset_fn: Callable[[str, str], tf.data.Dataset],\n output_feature_names: Sequence[str] = (\"inputs\", \"targets\")) -> None:\n \"\"\"Register a dummy task for GetDatasetTest.\"\"\"\n dataset_providers.TaskRegistry.add(\n task_name,\n source=dataset_providers.FunctionDataSource(\n dataset_fn=dataset_fn, splits=[\"train\", \"validation\"]),\n preprocessors=[\n dataset_providers.CacheDatasetPlaceholder(),\n preprocessors.append_eos_after_trim,\n ],\n output_features={\n feat: dataset_providers.Feature(test_utils.sentencepiece_vocab())\n for feat in output_feature_names\n },\n metric_fns=[])\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] |
[
[
"tensorflow.compat.v2.compat.v1.enable_eager_execution",
"tensorflow.compat.v2.data.Dataset.from_generator",
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.data.Dataset.from_tensors",
"tensorflow.compat.v2.constant"
]
] |
motokimura/solaris
|
[
"6a9c3962d987d985384d0d41a187f5fbfadac82c"
] |
[
"solaris/tile/vector_tile.py"
] |
[
"import os\nimport numpy as np\nfrom shapely.geometry import box, Polygon\nimport geopandas as gpd\nfrom ..utils.core import _check_gdf_load, _check_crs\nfrom ..utils.tile import save_empty_geojson\nfrom ..utils.geo import gdf_get_projection_unit, split_multi_geometries\nfrom ..utils.geo import reproject_geometry\nfrom tqdm import tqdm\n\n\nclass VectorTiler(object):\n \"\"\"An object to tile geospatial vector data into smaller pieces.\n\n Arguments\n ---------\n\n\n Attributes\n ----------\n \"\"\"\n\n def __init__(self, dest_dir=None, dest_crs=None, output_format='GeoJSON',\n verbose=False, super_verbose=False):\n if verbose or super_verbose:\n print('Preparing the tiler...')\n self.dest_dir = dest_dir\n if not os.path.isdir(self.dest_dir):\n os.makedirs(self.dest_dir)\n if dest_crs is not None:\n self.dest_crs = _check_crs(dest_crs)\n self.output_format = output_format\n self.verbose = verbose\n self.super_verbose = super_verbose\n self.tile_paths = [] # retains the paths of the last call to .tile()\n if self.verbose or self.super_verbose:\n print('Initialization done.')\n\n def tile(self, src, tile_bounds, tile_bounds_crs=None, geom_type='Polygon',\n split_multi_geoms=True, min_partial_perc=0.0,\n dest_fname_base='geoms', obj_id_col=None,\n output_ext='.geojson'):\n \"\"\"Tile `src` into vector data tiles bounded by `tile_bounds`.\n\n Arguments\n ---------\n src : `str` or :class:`geopandas.GeoDataFrame`\n The source vector data to tile. Must either be a path to a GeoJSON\n or a :class:`geopandas.GeoDataFrame`.\n tile_bounds : list\n A :class:`list` made up of ``[left, top, right, bottom] `` sublists\n (this can be extracted from\n :class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery)\n tile_bounds_crs : int, optional\n The EPSG code or rasterio.crs.CRS object for the CRS that the tile\n bounds are in. RasterTiler.tile returns the CRS of the raster tiles\n and can be used here. If not provided, it's assumed that the CRS is the\n same as in `src`. This argument must be provided if the bound\n coordinates and `src` are not in the same CRS, otherwise tiling will\n not occur correctly.\n geom_type : str, optional (default: \"Polygon\")\n The type of geometries contained within `src`. Defaults to\n ``\"Polygon\"``, can also be ``\"LineString\"``.\n split_multi_geoms : bool, optional (default: True)\n Should multi-polygons or multi-linestrings generated by clipping\n a geometry into discontinuous pieces be separated? Defaults to yes\n (``True``).\n min_partial_perc : float, optional (default: 0.0)\n The minimum percentage of a :class:`shapely.geometry.Polygon` 's\n area or :class:`shapely.geometry.LineString` 's length that must\n be retained within a tile's bounds to be included in the output.\n Defaults to ``0.0``, meaning that the contained portion of a\n clipped geometry will be included, no matter how small.\n dest_fname_base : str, optional (default: 'geoms')\n The base filename to use when creating outputs. The lower left\n corner coordinates of the tile's bounding box will be appended\n when saving.\n obj_id_col : str, optional (default: None)\n If ``split_multi_geoms=True``, the name of a column that specifies\n a unique identifier for each geometry (e.g. the ``\"BuildingId\"``\n column in many SpaceNet datasets.) See\n :func:`solaris.utils.geo.split_multi_geometries` for more.\n output_ext : str, optional, (default: geojson)\n Extension of output files, can be 'geojson' or 'json'.\n \"\"\"\n tile_gen = self.tile_generator(src, tile_bounds, tile_bounds_crs,\n geom_type, split_multi_geoms,\n min_partial_perc,\n obj_id_col=obj_id_col)\n self.tile_paths = []\n for tile_gdf, tb in tqdm(tile_gen):\n if self.proj_unit not in ['meter', 'metre']:\n dest_path = os.path.join(\n self.dest_dir, '{}_{}_{}{}'.format(dest_fname_base,\n np.round(tb[0], 3),\n np.round(tb[3], 3),\n output_ext))\n else:\n dest_path = os.path.join(\n self.dest_dir, '{}_{}_{}{}'.format(dest_fname_base,\n int(tb[0]),\n int(tb[3]),\n output_ext))\n self.tile_paths.append(dest_path)\n if len(tile_gdf) > 0:\n tile_gdf.to_file(dest_path, driver='GeoJSON')\n else:\n save_empty_geojson(dest_path, self.dest_crs)\n\n def tile_generator(self, src, tile_bounds, tile_bounds_crs=None,\n geom_type='Polygon', split_multi_geoms=True,\n min_partial_perc=0.0, obj_id_col=None):\n \"\"\"Generate `src` vector data tiles bounded by `tile_bounds`.\n\n Arguments\n ---------\n src : `str` or :class:`geopandas.GeoDataFrame`\n The source vector data to tile. Must either be a path to a GeoJSON\n or a :class:`geopandas.GeoDataFrame`.\n tile_bounds : list\n A :class:`list` made up of ``[left, top, right, bottom] `` sublists\n (this can be extracted from\n :class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery)\n tile_bounds_crs : int, optional\n The EPSG code for the CRS that the tile bounds are in. If not\n provided, it's assumed that the CRS is the same as in `src`. This\n argument must be provided if the bound coordinates and `src` are\n not in the same CRS, otherwise tiling will not occur correctly.\n geom_type : str, optional (default: \"Polygon\")\n The type of geometries contained within `src`. Defaults to\n ``\"Polygon\"``, can also be ``\"LineString\"``.\n split_multi_geoms : bool, optional (default: True)\n Should multi-polygons or multi-linestrings generated by clipping\n a geometry into discontinuous pieces be separated? Defaults to yes\n (``True``).\n min_partial_perc : float, optional (default: 0.0)\n The minimum percentage of a :class:`shapely.geometry.Polygon` 's\n area or :class:`shapely.geometry.LineString` 's length that must\n be retained within a tile's bounds to be included in the output.\n Defaults to ``0.0``, meaning that the contained portion of a\n clipped geometry will be included, no matter how small.\n obj_id_col : str, optional (default: None)\n If ``split_multi_geoms=True``, the name of a column that specifies\n a unique identifier for each geometry (e.g. the ``\"BuildingId\"``\n column in many SpaceNet datasets.) See\n :func:`solaris.utils.geo.split_multi_geometries` for more.\n\n Yields\n ------\n tile_gdf : :class:`geopandas.GeoDataFrame`\n A tile geodataframe.\n tb : list\n A list with ``[left, top, right, bottom] `` coordinates for the\n boundaries contained by `tile_gdf`.\n \"\"\"\n self.src = _check_gdf_load(src)\n if self.verbose:\n print(\"Num tiles:\", len(tile_bounds))\n\n self.src_crs = _check_crs(self.src.crs)\n # check if the tile bounds and vector are in the same crs\n if tile_bounds_crs is not None:\n tile_bounds_crs = _check_crs(tile_bounds_crs)\n else:\n tile_bounds_crs = self.src_crs\n if self.src_crs != tile_bounds_crs:\n reproject_bounds = True # used to transform tb for clip_gdf()\n else:\n reproject_bounds = False\n\n self.proj_unit = self.src_crs.linear_units\n if getattr(self, 'dest_crs', None) is None:\n self.dest_crs = self.src_crs\n for i, tb in enumerate(tile_bounds):\n if self.super_verbose:\n print(\"\\n\", i, \"/\", len(tile_bounds))\n if reproject_bounds:\n tile_gdf = clip_gdf(self.src,\n reproject_geometry(box(*tb),\n tile_bounds_crs,\n self.src_crs),\n min_partial_perc,\n geom_type, verbose=self.super_verbose)\n else:\n tile_gdf = clip_gdf(self.src, tb, min_partial_perc, geom_type,\n verbose=self.super_verbose)\n if self.src_crs != self.dest_crs:\n tile_gdf = tile_gdf.to_crs(crs=self.dest_crs.to_wkt())\n if split_multi_geoms:\n split_multi_geometries(tile_gdf, obj_id_col=obj_id_col)\n yield tile_gdf, tb\n\n\ndef search_gdf_polygon(gdf, tile_polygon):\n \"\"\"Find polygons in a GeoDataFrame that overlap with `tile_polygon` .\n\n Arguments\n ---------\n gdf : :py:class:`geopandas.GeoDataFrame`\n A :py:class:`geopandas.GeoDataFrame` of polygons to search.\n tile_polygon : :py:class:`shapely.geometry.Polygon`\n A :py:class:`shapely.geometry.Polygon` denoting a tile's bounds.\n\n Returns\n -------\n precise_matches : :py:class:`geopandas.GeoDataFrame`\n The subset of `gdf` that overlaps with `tile_polygon` . If\n there are no overlaps, this will return an empty\n :py:class:`geopandas.GeoDataFrame`.\n\n \"\"\"\n\n sindex = gdf.sindex\n possible_matches_index = list(sindex.intersection(tile_polygon.bounds))\n possible_matches = gdf.iloc[possible_matches_index]\n precise_matches = possible_matches[\n possible_matches.intersects(tile_polygon)\n ]\n if precise_matches.empty:\n precise_matches = gpd.GeoDataFrame(geometry=[])\n return precise_matches\n\n\ndef clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type=\"Polygon\",\n use_sindex=True, verbose=False):\n \"\"\"Clip GDF to a provided polygon.\n\n Clips objects within `gdf` to the region defined by\n `poly_to_cut`. Also adds several columns to the output::\n\n `origarea`\n The original area of the polygons (only used if `geom_type` ==\n ``\"Polygon\"``).\n `origlen`\n The original length of the objects (only used if `geom_type` ==\n ``\"LineString\"``).\n `partialDec`\n The fraction of the object that remains after clipping\n (fraction of area for Polygons, fraction of length for\n LineStrings.) Can filter based on this by using `min_partial_perc`.\n `truncated`\n Boolean indicator of whether or not an object was clipped.\n\n Arguments\n ---------\n gdf : :py:class:`geopandas.GeoDataFrame`\n A :py:class:`geopandas.GeoDataFrame` of polygons to clip.\n tile_bounds : `list` or :class:`shapely.geometry.Polygon`\n The geometry to clip objects in `gdf` to. This can either be a\n ``[left, top, right, bottom] `` bounds list or a\n :class:`shapely.geometry.Polygon` object defining the area to keep.\n min_partial_perc : float, optional\n The minimum fraction of an object in `gdf` that must be\n preserved. Defaults to 0.0 (include any object if any part remains\n following clipping).\n geom_type : str, optional\n Type of objects in `gdf`. Can be one of\n ``[\"Polygon\", \"LineString\"]`` . Defaults to ``\"Polygon\"`` .\n use_sindex : bool, optional\n Use the `gdf` sindex be used for searching. Improves efficiency\n but requires `libspatialindex <http://libspatialindex.github.io/>`__ .\n verbose : bool, optional\n Switch to print relevant values.\n\n Returns\n -------\n cut_gdf : :py:class:`geopandas.GeoDataFrame`\n `gdf` with all contained objects clipped to `poly_to_cut` .\n See notes above for details on additional clipping columns added.\n\n \"\"\"\n if isinstance(tile_bounds, tuple):\n tb = box(*tile_bounds)\n elif isinstance(tile_bounds, list):\n tb = box(*tile_bounds)\n elif isinstance(tile_bounds, Polygon):\n tb = tile_bounds\n if use_sindex and (geom_type == \"Polygon\"):\n gdf = search_gdf_polygon(gdf, tb)\n\n # if geom_type == \"LineString\":\n if 'origarea' in gdf.columns:\n pass\n else:\n if \"geom_type\" == \"LineString\":\n gdf['origarea'] = 0\n else:\n gdf['origarea'] = gdf.area\n\n if 'origlen' in gdf.columns:\n pass\n else:\n if \"geom_type\" == \"LineString\":\n gdf['origlen'] = gdf.length\n else:\n gdf['origlen'] = 0\n # TODO must implement different case for lines and for spatialIndex\n # (Assume RTree is already performed)\n\n cut_gdf = gdf.copy()\n cut_gdf.geometry = gdf.intersection(tb)\n\n if geom_type == 'Polygon':\n cut_gdf['partialDec'] = cut_gdf.area / cut_gdf['origarea']\n cut_gdf = cut_gdf.loc[cut_gdf['partialDec'] > min_partial_perc, :]\n cut_gdf['truncated'] = (cut_gdf['partialDec'] != 1.0).astype(int)\n else:\n # assume linestrings\n # remove null\n cut_gdf = cut_gdf[cut_gdf['geometry'].notnull()]\n cut_gdf['partialDec'] = 1\n cut_gdf['truncated'] = 0\n # cut_gdf = cut_gdf[cut_gdf.geom_type != \"GeometryCollection\"]\n if len(cut_gdf) > 0 and verbose:\n print(\"clip_gdf() - gdf.iloc[0]:\", gdf.iloc[0])\n print(\"clip_gdf() - tb:\", tb)\n print(\"clip_gdf() - gdf_cut:\", cut_gdf)\n\n # TODO: IMPLEMENT TRUNCATION MEASUREMENT FOR LINESTRINGS\n\n return cut_gdf\n"
] |
[
[
"numpy.round"
]
] |
MMV-Lab/cell_movie_analysis
|
[
"26ac844a79ee4978db26aea8fc1fc9bd6a19a2c0"
] |
[
"run_tracking.py"
] |
[
"import os\nimport numpy as np\nfrom glob import glob\nfrom scipy import optimize, spatial, ndimage\nfrom tifffile import imread, imsave\nfrom skimage.segmentation import find_boundaries\nfrom skimage.morphology import remove_small_objects\nfrom skimage.draw import line\nfrom utils import random_colormap\nimport pdb\n\n# define binarization function\ndef prepare_binary(fn):\n # generate binary segmentaiton result\n seg = np.squeeze(imread(fn)) > bw_th\n seg = remove_small_objects(seg>0, min_size=min_obj_size)\n return seg\n\n# params\nmax_matching_dist = 45\napprox_inf = 65535\ntrack_display_legnth = 20\nmin_obj_size = 20\nbw_th = -0.5\n\nparent_path = \"/mnt/data/\"\nall_movies = glob(parent_path + \"timelapse/*.tiff\")\nfor M_idx, movies in enumerate(all_movies):\n movie_basename = os.path.basename(movies)\n well_name = movie_basename[:-5]\n\n seg_path = f\"{parent_path}timelapse_seg/{well_name}/\"\n # vis_path = f\"{parent_path}timelapse_track/{well_name}\"\n # os.makedirs(vis_path, exist_ok=True)\n raw_path = f\"{parent_path}timelapse/{well_name}\"\n track_result = f\"{parent_path}timelapse_track/{well_name}_result.npy\"\n\n\n total_time = len(glob(raw_path + \"/*.tiff\"))\n traj = dict()\n lineage = dict()\n for tt in range(total_time):\n seg_fn = seg_path + f\"img_{tt}_segmentation.tiff\"\n\n seg = prepare_binary(seg_fn)\n\n # get label image\n seg_label, num_cells = ndimage.label(seg)\n\n # calculate center of mass\n centroid = ndimage.center_of_mass(seg, labels=seg_label, index=np.arange(1, num_cells + 1))\n\n # generate cell information of this frame\n traj.update({\n tt : {\"centroid\": centroid, \"parent\": [], \"child\": [], \"ID\": []}\n })\n\n \n # initialize trajectory ID, parent node, track pts for the first frame\n max_cell_id = len(traj[0].get(\"centroid\"))\n traj[0].update(\n {\"ID\": np.arange(0, max_cell_id, 1)}\n )\n traj[0].update(\n {\"parent\": -1 * np.ones(max_cell_id, dtype=int)}\n )\n centers = traj[0].get(\"centroid\")\n pts = []\n for ii in range(max_cell_id):\n pts.append([centers[ii]])\n lineage.update({ii: [centers[ii]]})\n traj[0].update({\"track_pts\": pts})\n\n for tt in np.arange(1, total_time):\n p_prev = traj[tt-1].get(\"centroid\")\n p_next = traj[tt].get(\"centroid\")\n\n ###########################################################\n # simple LAP tracking\n ###########################################################\n num_cell_prev = len(p_prev)\n num_cell_next = len(p_next)\n\n # calculate distance between each pair of cells\n cost_mat = spatial.distance.cdist(p_prev, p_next)\n\n # if the distance is too far, change to approx. Inf.\n cost_mat[cost_mat > max_matching_dist] = approx_inf\n\n # add edges from cells in previous frame to auxillary vertices\n # in order to accomendate segmentation errors and leaving cells\n cost_mat_aug = max_matching_dist * 1.2 * np.ones(\n (num_cell_prev, num_cell_next + num_cell_prev), dtype=float\n )\n cost_mat_aug[:num_cell_prev, :num_cell_next] = cost_mat[:, :]\n\n # solve the optimization problem\n row_ind, col_ind = optimize.linear_sum_assignment(cost_mat_aug)\n\n #########################################################\n # parse the matching result\n #########################################################\n prev_child = np.ones(num_cell_prev, dtype=int)\n next_parent = np.ones(num_cell_next, dtype=int)\n next_ID = np.zeros(num_cell_next, dtype=int)\n next_track_pts = []\n\n # assign child for cells in previous frame\n for ii in range(num_cell_prev):\n if col_ind[ii] >= num_cell_next:\n prev_child[ii] = -1\n else:\n prev_child[ii] = col_ind[ii]\n\n # assign parent for cells in next frame, update ID and track pts\n prev_pt = traj[tt-1].get(\"track_pts\")\n prev_id = traj[tt-1].get(\"ID\")\n for ii in range(num_cell_next):\n if ii in col_ind:\n # a matched cell is found\n next_parent[ii] = np.where(col_ind == ii)[0][0]\n next_ID[ii] = prev_id[next_parent[ii]]\n \n current_pts = prev_pt[next_parent[ii]].copy()\n current_pts.append(p_next[ii])\n if len(current_pts) > track_display_legnth:\n current_pts.pop(0)\n next_track_pts.append(current_pts)\n # attach this point to the lineage\n single_lineage = lineage.get(next_ID[ii])\n try:\n single_lineage.append(p_next[ii])\n except Exception:\n pdb.set_trace()\n lineage.update({next_ID[ii]: single_lineage})\n else:\n # a new cell\n next_parent[ii] = -1\n next_ID[ii] = max_cell_id\n next_track_pts.append([p_next[ii]])\n lineage.update({max_cell_id: [p_next[ii]]})\n max_cell_id += 1\n\n # update record\n traj[tt-1].update({\"child\": prev_child})\n traj[tt].update({\"parent\": next_parent})\n traj[tt].update({\"ID\": next_ID})\n traj[tt].update({\"track_pts\": next_track_pts})\n\n np.save(track_result, [traj, lineage])\n\n\"\"\"\n######################################################\n# generate track visualization\n######################################################\ncmap = random_colormap()\nfor tt in range(total_time):\n # print(traj[tt].get(\"ID\"))\n\n # load segmentation and extract contours\n seg_fn = seg_path + f\"img_{tt}_segmentation.tiff\"\n seg = prepare_binary(seg_fn)\n seg_label, num_cells = ndimage.label(seg)\n cell_contours = find_boundaries(seg, mode='inner').astype(np.uint16)\n cell_contours[cell_contours > 0] = 1\n cell_contours = cell_contours * seg_label.astype(np.uint16)\n cell_contours = cell_contours - 1 # to make the first object has label 0, to match index\n\n # load raw image and create visualizaiton in RGB\n # TODO: use real raw images\n # raw = seg.astype(np.uint8)\n raw = np.squeeze(imread(raw_path + f\"img_{tt}.tiff\")).astype(np.float32)\n raw = (raw - raw.min())/ (raw.max() - raw.min())\n raw = raw * 255\n raw = raw.astype(np.uint8)\n vis = np.zeros((raw.shape[0], raw.shape[1], 3), dtype=np.uint8)\n for cc in range(3):\n vis[:, :, cc] = raw\n\n # loop through all cells, for each cell, we do the following\n # 1- find ID, 2- load the color, 3- draw contour 4- draw track\n cell_id = traj[tt].get(\"ID\")\n pts = traj[tt].get(\"track_pts\")\n for cid in range(num_cells):\n # find ID\n this_id = cell_id[cid]\n\n # load the color\n this_color = 255 * cmap.colors[this_id]\n this_color = this_color.astype(np.uint8)\n\n # draw contour\n for cc in range(3):\n vis_c = vis[:, :, cc]\n vis_c[cell_contours == cid] = this_color[cc]\n vis[:, :, cc] = vis_c # TODO: check if we need this line\n\n # draw track\n this_track = pts[cid]\n\n if len(this_track) < 2:\n continue\n else:\n for pid in range(len(this_track) - 1):\n p1 = this_track[pid]\n p2 = this_track[pid + 1]\n rr, cc = line(int(round(p1[0])), int(round(p1[1])), int(round(p2[0])), int(round(p2[1])))\n for ch in range(3):\n vis[rr, cc ,ch] = this_color[ch]\n\n imsave(vis_path + f\"img_{tt+1}.tiff\", vis)\n\"\"\"\n"
] |
[
[
"numpy.zeros",
"scipy.ndimage.label",
"numpy.ones",
"numpy.save",
"scipy.optimize.linear_sum_assignment",
"numpy.where",
"numpy.arange",
"scipy.spatial.distance.cdist"
]
] |
RadiotherapyAI/platipy
|
[
"53294789a3805ea088c9953027f4ab09a614f052",
"53294789a3805ea088c9953027f4ab09a614f052"
] |
[
"platipy/dicom/io/crawl.py",
"platipy/imaging/projects/cardiac/run.py"
] |
[
"# Copyright 2020 University of New South Wales, University of Sydney\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport re\nimport sys\n\nimport pathlib\nimport pydicom\nimport numpy as np\nimport SimpleITK as sitk\n\nfrom skimage.draw import polygon\nfrom loguru import logger\n\nfrom datetime import datetime\n\n\ndef flatten(itr):\n if type(itr) in (str, bytes, sitk.Image):\n yield itr\n else:\n for x in itr:\n try:\n yield from flatten(x)\n except TypeError:\n yield x\n\n\ndef get_suv_bw_scale_factor(ds):\n # Modified from\n # https://qibawiki.rsna.org/images/6/62/SUV_vendorneutral_pseudocode_happypathonly_20180626_DAC.pdf\n\n if ds.Units == \"CNTS\":\n # Try to find the Philips private scale factor\")\n return float(ds[0x7053, 0x1000].value)\n\n assert ds.Modality == \"PT\"\n assert \"DECY\" in ds.CorrectedImage\n assert \"ATTN\" in ds.CorrectedImage\n assert \"START\" in ds.DecayCorrection\n assert ds.Units == \"BQML\"\n\n half_life = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideHalfLife)\n\n if \"SeriesTime\" in ds:\n series_date_time = ds.SeriesDate + \"_\" + ds.SeriesTime\n if \".\" in series_date_time:\n series_date_time = series_date_time[\n : -(len(series_date_time) - series_date_time.index(\".\"))\n ]\n series_date_time = datetime.strptime(series_date_time, \"%Y%m%d_%H%M%S\")\n\n if \"SeriesTime\" in ds:\n start_time = (\n ds.SeriesDate\n + \"_\"\n + ds.RadiopharmaceuticalInformationSequence[0].RadiopharmaceuticalStartTime\n )\n if \".\" in start_time:\n start_time = start_time[: -(len(start_time) - start_time.index(\".\"))]\n start_time = datetime.strptime(start_time, \"%Y%m%d_%H%M%S\")\n\n decay_time = (series_date_time - start_time).seconds\n injected_dose = float(ds.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)\n decayed_dose = injected_dose * pow(2, -decay_time / half_life)\n patient_weight = float(ds.PatientWeight)\n suv_bw_scale_factor = patient_weight * 1000 / decayed_dose\n\n return suv_bw_scale_factor\n\n\ndef get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name=\"UNKNOWN\"):\n \"\"\"\n Attempts to return some information from a DICOM\n This is typically used for naming converted NIFTI files\n\n Args:\n dicom_object (pydicom.dataset.FileDataset): The DICOM object\n return_extra (bool, optional): return information that is usually not required\n\n Returns:\n info (str): Some extracted information\n \"\"\"\n try:\n dicom_sop_class_name = dicom_object.SOPClassUID.name\n except AttributeError:\n logger.warning(f\"Could not find DICOM SOP Class UID, using {sop_class_name}.\")\n dicom_sop_class_name = sop_class_name\n\n if \"Image\" in dicom_sop_class_name:\n # Get the modality\n image_modality = dicom_object.Modality\n logger.info(f\" Image modality: {image_modality}\")\n\n if image_modality == \"CT\":\n # There is typically not much extra information\n # At the moment, we do not return anything for CT imaging\n if return_extra:\n try:\n protocol_name = dicom_object.ProtocolName\n\n if protocol_name != \"\":\n return re.sub(r\"[^\\w]\", \"_\", protocol_name).upper()\n except AttributeError:\n logger.warning(\" Could not find ProtocolName\")\n\n return \"\"\n\n elif image_modality == \"MR\":\n # Not much consistency, but we can get the protocol name\n try:\n protocol_name = re.sub(r\"[^\\w]\", \"_\", dicom_object.ProtocolName).upper()\n except AttributeError:\n logger.warning(\" Could not find ProtocolName\")\n protocol_name = \"\"\n\n try:\n sequence_name = re.sub(r\"[^\\w]\", \"_\", dicom_object.SequenceName).upper()\n except AttributeError:\n logger.warning(\" Could not find SequenceName\")\n sequence_name = \"\"\n\n try:\n series_description = re.sub(r\"[^\\w]\", \"_\", dicom_object.SeriesDescription).upper()\n except AttributeError:\n logger.warning(\" Could not find SequenceName\")\n series_description = \"\"\n\n combined_name = \"_\".join([protocol_name, sequence_name, series_description])\n\n while \"__\" in combined_name:\n combined_name = combined_name.replace(\"__\", \"_\")\n\n if protocol_name != \"\" and not return_extra:\n return protocol_name\n\n else:\n return combined_name\n\n elif image_modality == \"PT\":\n # Not much experience with this\n # We can search through the corrections applied\n # Return whether or not attentuation is applied\n\n try:\n corrections = dicom_object.CorrectedImage\n except AttributeError:\n corrections = \"NONE\"\n\n if \"ATTN\" in corrections:\n return \"AC\"\n else:\n return \"NAC\"\n\n\ndef safe_sort_dicom_image_list(dicom_image_list):\n \"\"\"\n Sorts a list of DICOM image files based on a DICOM tag value.\n This is a much safer method than reading SliceLocation.\n It takes mandatory DICOM fields (Image Position [Patient]) and (Image Orientation [Patient]).\n The list of DICOM files is sorted by projecting the image position onto the axis normal to the\n place defined by the image orientation.\n\n This accounts for differences in patient position (e.g. HFS/FFS).\n\n Args:\n dicom_image_list (list): [description]\n \"\"\"\n sorted_dict = {}\n for dicom_file in dicom_image_list:\n dcm = pydicom.read_file(dicom_file, force=True)\n\n image_position = np.array(dcm.ImagePositionPatient, dtype=float)\n image_orientation = np.array(dcm.ImageOrientationPatient, dtype=float)\n\n image_plane_normal = np.cross(image_orientation[:3], image_orientation[3:])\n\n slice_location = (image_position * image_plane_normal)[2]\n\n sorted_dict[dicom_file] = slice_location\n\n sorter_safe = lambda dcm_file: sorted_dict[dcm_file]\n\n return sorted(dicom_image_list, key=sorter_safe)\n\n\ndef fix_missing_data(contour_data_list):\n \"\"\"\n Fixes missing points in contouring using simple linear interpolation\n\n\n Args:\n contour_data_list (list): The contour data for each slice\n\n Returns:\n contour_data (numpy array): Interpolated contour data\n \"\"\"\n contour_data = np.array(contour_data_list)\n if contour_data.any() == \"\":\n logger.warning(\" Missing values detected.\")\n missing_values = np.where(contour_data == \"\")[0]\n if missing_values.shape[0] > 1:\n logger.warning(\" More than one value missing, fixing this isn't implemented yet...\")\n else:\n logger.warning(\" Only one value missing.\")\n missing_index = missing_values[0]\n missing_axis = missing_index % 3\n if missing_axis == 0:\n logger.warning(\" Missing value in x axis: interpolating.\")\n if missing_index > len(contour_data) - 3:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[0]\n elif missing_index == 0:\n lower_val = contour_data[-3]\n upper_val = contour_data[3]\n else:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[missing_index + 3]\n contour_data[missing_index] = 0.5 * (lower_val + upper_val)\n elif missing_axis == 1:\n logger.warning(\" Missing value in y axis: interpolating.\")\n if missing_index > len(contour_data) - 2:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[1]\n elif missing_index == 0:\n lower_val = contour_data[-2]\n upper_val = contour_data[4]\n else:\n lower_val = contour_data[missing_index - 3]\n upper_val = contour_data[missing_index + 3]\n contour_data[missing_index] = 0.5 * (lower_val + upper_val)\n else:\n logger.warning(\" Missing value in z axis: taking slice value\")\n temp = contour_data[2::3].tolist()\n temp.remove(\"\")\n contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))\n return contour_data\n\n\ndef transform_point_set_from_dicom_struct(image, dicom_struct, spacing_override=False):\n \"\"\"\n This function is used to generate a binary mask from a set of vertices.\n This allows us to convert from DICOM-RTStruct format to any imaging format.\n\n Args:\n image ([SimpleITK.Image]): The image, used to copy imaging information\n (e.g. resolution, spacing)\n dicom_struct ([pydicom.Dataset]): The DICOM-RTStruct file\n spacing_override (bool | tuple, optional): Overwrite the spacing.\n Set with (axial_spacing, coronal_spacing, sagittal spacing). Defaults to False.\n\n Returns:\n list, list : final_struct_name_sequence, structure_list\n \"\"\"\n if spacing_override:\n current_spacing = list(image.GetSpacing())\n new_spacing = tuple(\n [\n current_spacing[k] if spacing_override[k] == 0 else spacing_override[k]\n for k in range(3)\n ]\n )\n image.SetSpacing(new_spacing)\n\n struct_point_sequence = dicom_struct.ROIContourSequence\n struct_name_sequence = [\n \"_\".join(i.ROIName.split()) for i in dicom_struct.StructureSetROISequence\n ]\n\n structure_list = []\n final_struct_name_sequence = []\n\n for structIndex, structure_name in enumerate(struct_name_sequence):\n image_blank = np.zeros(image.GetSize()[::-1], dtype=np.uint8)\n logger.info(\n \" Converting structure {0} with name: {1}\".format(structIndex, structure_name)\n )\n\n if structIndex >= len(struct_point_sequence):\n logger.warning(\" Contour sequence is missing, skipping.\")\n continue\n\n if not hasattr(struct_point_sequence[structIndex], \"ContourSequence\"):\n logger.warning(\" No contour sequence found for this structure, skipping.\")\n continue\n\n if len(struct_point_sequence[structIndex].ContourSequence) == 0:\n logger.warning(\" Contour sequence is empty, skipping.\")\n continue\n\n if (\n not struct_point_sequence[structIndex].ContourSequence[0].ContourGeometricType\n == \"CLOSED_PLANAR\"\n ):\n logger.warning(\" This is not a closed planar structure, skipping.\")\n continue\n\n for sl in range(len(struct_point_sequence[structIndex].ContourSequence)):\n\n contour_data = fix_missing_data(\n struct_point_sequence[structIndex].ContourSequence[sl].ContourData\n )\n\n struct_slice_contour_data = np.array(contour_data, dtype=np.double)\n vertexArr_physical = struct_slice_contour_data.reshape(\n struct_slice_contour_data.shape[0] // 3, 3\n )\n\n point_arr = np.array(\n [image.TransformPhysicalPointToIndex(i) for i in vertexArr_physical]\n ).T\n\n [xVertexArr_image, yVertexArr_image] = point_arr[[0, 1]]\n zIndex = point_arr[2][0]\n\n if np.any(point_arr[2] != zIndex):\n logger.error(\" Axial slice index varies in contour. Quitting now.\")\n logger.error(\" Structure: {0}\".format(structure_name))\n logger.error(\" Slice index: {0}\".format(zIndex))\n quit()\n\n if zIndex >= image.GetSize()[2]:\n logger.warning(\" Slice index greater than image size. Skipping slice.\")\n logger.warning(\" Structure: {0}\".format(structure_name))\n logger.warning(\" Slice index: {0}\".format(zIndex))\n continue\n\n sliceArr = np.zeros(image.GetSize()[:2], dtype=np.uint8)\n filledIndicesX, filledIndicesY = polygon(\n xVertexArr_image, yVertexArr_image, shape=sliceArr.shape\n )\n sliceArr[filledIndicesX, filledIndicesY] = 1\n image_blank[zIndex] += sliceArr.T\n\n struct_image = sitk.GetImageFromArray(1 * (image_blank > 0))\n struct_image.CopyInformation(image)\n structure_list.append(sitk.Cast(struct_image, sitk.sitkUInt8))\n structure_name_clean = re.sub(r\"[^\\w]\", \"_\", structure_name).upper()\n while \"__\" in structure_name_clean:\n structure_name_clean = structure_name_clean.replace(\"__\", \"_\")\n final_struct_name_sequence.append(structure_name_clean)\n\n return final_struct_name_sequence, structure_list\n\n\ndef process_dicom_file_list(dicom_file_list, parent_sorting_field=\"PatientName\", verbose=False):\n\n \"\"\"\n Organise the DICOM files by the series UID\n \"\"\"\n dicom_series_dict_parent = {}\n\n for i, dicom_file in enumerate(sorted(dicom_file_list)):\n if verbose is True:\n logger.debug(f\" Sorting file {i}\")\n\n dicom_file = dicom_file.as_posix()\n\n if \"dicomdir\" in dicom_file.lower():\n logger.warning(\n \"DICOMDIR is not supported in this tool, images are read directly. Skipping.\"\n )\n continue\n\n dicom_object = pydicom.read_file(dicom_file, force=True)\n\n parent_sorting_field_data = dicom_object[parent_sorting_field].value\n\n if parent_sorting_field_data not in dicom_series_dict_parent.keys():\n dicom_series_dict_parent[parent_sorting_field_data] = {}\n\n series_uid = dicom_object.SeriesInstanceUID\n\n if series_uid not in dicom_series_dict_parent[parent_sorting_field_data].keys():\n dicom_series_dict_parent[parent_sorting_field_data][series_uid] = [dicom_file]\n\n else:\n dicom_series_dict_parent[parent_sorting_field_data][series_uid].append(dicom_file)\n\n return dicom_series_dict_parent\n\n\ndef process_dicom_series(\n dicom_series_dict,\n series_uid,\n parent_sorting_field=\"PatientName\",\n return_extra=True,\n individual_file=False,\n initial_sop_class_name_default=\"UNKNOWN\",\n):\n if not individual_file:\n logger.info(f\" Processing series UID: {series_uid}\")\n dicom_file_list = dicom_series_dict[series_uid]\n else:\n logger.info(f\" Processing individual file: {individual_file}\")\n dicom_file_list = [individual_file]\n\n logger.info(f\" Number of DICOM files: {len(dicom_file_list)}\")\n\n initial_dicom = pydicom.read_file(dicom_file_list[0])\n\n # Get the data in the parent sorting field, clean with RegEx\n parent_sorting_data = re.sub(\n r\"[^\\w]\", \"_\", str(initial_dicom[parent_sorting_field].value)\n ).upper()\n\n if parent_sorting_data == \"\":\n logger.error(\n f\"Could not find any data in {parent_sorting_field}. This is very bad, the data cannot be sorted properly.\"\n )\n \"\"\"\n ! TO DO\n Implement a routine to let a user correlate a root directory with a name\n \"\"\"\n parent_sorting_data = \"TEMP\"\n\n try:\n initial_dicom_sop_class_name = initial_dicom.SOPClassUID.name\n except AttributeError:\n logger.warning(\n f\"Could not find DICOM SOP Class UID, using {initial_sop_class_name_default}.\"\n )\n initial_dicom_sop_class_name = initial_sop_class_name_default\n\n try:\n study_uid = initial_dicom.StudyInstanceUID\n except AttributeError:\n study_uid = \"00001\"\n\n \"\"\"\n ! TO DO\n Need to check for secondary capture image storage\n This can include JPEGs with written information on them\n This is typically not very useful\n We can dump it to file\n Or just save the DICOM file in the folder of interest\n\n Not a big problem, sort out another day\n \"\"\"\n\n # Check the potential types of DICOM files\n if (\n \"Image\" in initial_dicom_sop_class_name\n and initial_dicom_sop_class_name != \"Secondary Capture Image Storage\"\n ):\n # Load as an primary image\n\n sorted_file_list = safe_sort_dicom_image_list(dicom_file_list)\n\n try:\n image = sitk.ReadImage(sorted_file_list)\n except RuntimeError:\n logger.warning(\" Could not read image into SimpleITK.\")\n logger.info(\" Processing files individually.\")\n\n for dicom_file in dicom_file_list:\n return process_dicom_series(\n dicom_series_dict,\n series_uid,\n parent_sorting_field=parent_sorting_field,\n return_extra=return_extra,\n individual_file=dicom_file,\n initial_sop_class_name_default=initial_sop_class_name_default,\n )\n\n dicom_file_metadata = {\n \"parent_sorting_data\": parent_sorting_data,\n \"study_uid\": study_uid,\n }\n\n \"\"\"\n ! TO DO - integrity check\n Read in all the files here, check the slice location and determine if any are missing\n \"\"\"\n if initial_dicom.Modality == \"PT\":\n\n # scaling_factor = get_suv_bw_scale_factor(initial_dicom)\n # image *= scaling_factor\n\n # !TO DO\n # Work on PET SUV conversion\n None\n\n \"\"\"\n ! CHECKPOINT\n Some DCE MRI sequences have the same series UID\n Here we check the sequence name, and split if necessary\n \"\"\"\n\n if initial_dicom.Modality == \"MR\":\n\n try:\n sequence_names = np.unique(\n [pydicom.read_file(x).SequenceName for x in dicom_file_list]\n )\n\n sequence_dict = {}\n for dcm_name in dicom_file_list:\n dcm_obj = pydicom.read_file(dcm_name)\n var = dcm_obj.SequenceName\n if var not in sequence_dict.keys():\n sequence_dict[var] = [dcm_name]\n else:\n sequence_dict[var].append(dcm_name)\n\n except AttributeError:\n try:\n logger.warning(\n \" MRI sequence name not found. The SeriesDescription will be used instead.\"\n )\n\n sequence_names = np.unique(\n [pydicom.read_file(x).SeriesDescription for x in dicom_file_list]\n )\n\n sequence_dict = {}\n for dcm_name in dicom_file_list:\n dcm_obj = pydicom.read_file(dcm_name)\n var = dcm_obj.SeriesDescription\n if var not in sequence_dict.keys():\n sequence_dict[var] = [dcm_name]\n else:\n sequence_dict[var].append(dcm_name)\n\n except AttributeError:\n logger.warning(\n \" MRI SeriesDescription not found. The AcquisitionComments will be used instead.\"\n )\n\n sequence_names = np.unique(\n [pydicom.read_file(x).AcquisitionComments for x in dicom_file_list]\n )\n\n sequence_dict = {}\n for dcm_name in dicom_file_list:\n dcm_obj = pydicom.read_file(dcm_name)\n var = dcm_obj.AcquisitionComments\n if var not in sequence_dict.keys():\n sequence_dict[var] = [dcm_name]\n else:\n sequence_dict[var].append(dcm_name)\n\n if initial_dicom.Manufacturer == \"GE MEDICAL SYSTEMS\":\n # GE use the DICOM tag (0019, 10a2) [Raw data run number]\n # in Diffusion weighted MRI sequences\n # We need to separate this out to get the difference sequences\n\n if initial_dicom.SeriesDescription == \"Diffusion Weighted\":\n\n # num_sequences = int( (initial_dicom[(0x0025, 0x1007)]) / (initial_dicom[(0x0021, 0x104f)]) )\n # number_of_images / images_per_seq\n num_images_per_seq = initial_dicom[(0x0021, 0x104F)].value\n\n sequence_names = np.unique(\n [\n f\"DWI_{str( ( pydicom.read_file(x)['InstanceNumber'].value - 1) // num_images_per_seq )}\"\n for x in dicom_file_list\n ]\n )\n\n sequence_name_index_dict = {\n name: index for index, name in enumerate(sequence_names)\n }\n\n sequence_dict = {}\n for dcm_name in dicom_file_list:\n dcm_obj = pydicom.read_file(dcm_name)\n var = f\"DWI_{str( ( dcm_obj['InstanceNumber'].value - 1) // num_images_per_seq )}\"\n var_to_index = sequence_name_index_dict[var]\n\n if var_to_index not in sequence_dict.keys():\n sequence_dict[var_to_index] = [dcm_name]\n else:\n sequence_dict[var_to_index].append(dcm_name)\n\n sequence_names = sorted(sequence_dict.keys())\n\n if np.alen(sequence_names) > 1:\n logger.warning(\" Two MR sequences were found under a single series UID.\")\n logger.warning(\" These will be split into separate images.\")\n\n # Split up the DICOM file list by sequence name\n for sequence_name in sequence_names:\n\n dicom_file_list_by_sequence = sequence_dict[sequence_name]\n\n logger.info(sequence_name)\n logger.info(len(dicom_file_list_by_sequence))\n\n sorted_file_list = safe_sort_dicom_image_list(dicom_file_list_by_sequence)\n\n initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)\n\n image_by_sequence = sitk.ReadImage(sorted_file_list)\n\n dicom_file_metadata_by_sequence = {\n \"parent_sorting_data\": parent_sorting_data,\n \"study_uid\": study_uid,\n }\n\n yield \"IMAGES\", dicom_file_metadata_by_sequence, initial_dicom, image_by_sequence\n return # Stop iteration\n\n yield \"IMAGES\", dicom_file_metadata, initial_dicom, image\n\n if \"Structure\" in initial_dicom_sop_class_name:\n # Load as an RT structure set\n # This should be done individually for each file\n\n logger.info(f\" Number of files: {len(dicom_file_list)}\")\n for index, dicom_file in enumerate(dicom_file_list):\n dicom_object = pydicom.read_file(dicom_file, force=True)\n\n # We must also read in the corresponding DICOM image\n # This can be found by matching the references series UID to the series UID\n\n \"\"\"\n ! TO DO\n What happens if there is an RT structure set with different referenced sequences?\n \"\"\"\n\n # Get the \"ReferencedFrameOfReferenceSequence\", first item\n referenced_frame_of_reference_item = dicom_object.ReferencedFrameOfReferenceSequence[0]\n\n # Get the \"RTReferencedStudySequence\", first item\n # This retrieves the study UID\n # This might be useful, but would typically match the actual StudyInstanceUID in the\n # DICOM object\n rt_referenced_series_item = (\n referenced_frame_of_reference_item.RTReferencedStudySequence[0]\n )\n\n # Get the \"RTReferencedSeriesSequence\", first item\n # This retreives the actual referenced series UID, which we need to match imaging\n # parameters\n rt_referenced_series_again_item = rt_referenced_series_item.RTReferencedSeriesSequence[\n 0\n ]\n\n # Get the appropriate series instance UID\n image_series_uid = rt_referenced_series_again_item.SeriesInstanceUID\n logger.info(f\" Item {index}: Matched SeriesInstanceUID = {image_series_uid}\")\n\n # Read in the corresponding image\n sorted_file_list = safe_sort_dicom_image_list(dicom_series_dict[image_series_uid])\n image = sitk.ReadImage(sorted_file_list)\n\n initial_dicom = pydicom.read_file(sorted_file_list[0], force=True)\n\n (\n structure_name_list,\n structure_image_list,\n ) = transform_point_set_from_dicom_struct(image, dicom_object)\n\n dicom_file_metadata = {\n \"parent_sorting_data\": parent_sorting_data,\n \"study_uid\": study_uid,\n \"structure_name_list\": structure_name_list,\n }\n\n yield \"STRUCTURES\", dicom_file_metadata, dicom_object, structure_image_list\n\n if \"Dose\" in initial_dicom_sop_class_name:\n # Load as an RT Dose distribution\n # This should be done individually for each file\n\n logger.info(f\" Number of files: {len(dicom_file_list)}\")\n for index, dicom_file in enumerate(dicom_file_list):\n dicom_object = pydicom.read_file(dicom_file, force=True)\n\n \"\"\"\n ! CHECKPOINT\n There should only be a single RT dose file (with each series UID)\n If there are more, yield each\n \"\"\"\n\n initial_dicom = pydicom.read_file(dicom_file, force=True)\n\n dicom_file_metadata = {\n \"parent_sorting_data\": parent_sorting_data,\n \"study_uid\": study_uid,\n }\n\n # We must read in as a float otherwise when we multiply by one later it will not work!\n raw_dose_image = sitk.ReadImage(dicom_file, sitk.sitkFloat32)\n\n dose_grid_scaling = dicom_object.DoseGridScaling\n\n logger.debug(f\" Dose grid scaling: {dose_grid_scaling} Gy\")\n\n scaled_dose_image = raw_dose_image * dose_grid_scaling\n\n yield \"DOSES\", dicom_file_metadata, dicom_object, scaled_dose_image\n\n \"\"\"\n ! TO DO\n 1. (DONE) Implement conversion of dose files (to NIFTI images)\n 2. Implement conversion of RT plan files to text dump\n 3. Do something with other files (e.g. Deformable Image Registration stuff)\n \"\"\"\n\n return\n\n\ndef write_output_data_to_disk(\n output_data_dict,\n output_directory=\"./\",\n output_file_suffix=\".nii.gz\",\n overwrite_existing_files=False,\n):\n \"\"\"\n Write output to disk\n \"\"\"\n if output_data_dict is None:\n return\n\n filename_fields = [i for i in output_data_dict.keys() if i != \"parent_sorting_data\"]\n parent_sorting_data = output_data_dict[\"parent_sorting_data\"]\n\n files_written = {}\n\n \"\"\"\n Write the the converted images to disk\n\n ! CONSIDER\n We could simply write as we go?\n Pro: save memory, important if processing very large files\n Con: Reading as we go allows proper indexing\n\n \"\"\"\n\n for field in filename_fields:\n logger.info(f\" Writing files for field: {field}\")\n p = pathlib.Path(output_directory) / parent_sorting_data / field\n p.mkdir(parents=True, exist_ok=True)\n files_written[field] = []\n\n for field_filename_base, field_list in output_data_dict[field].items():\n # Check if there is a list of images with matching names\n # This will depend on the name format chosen\n # If there is a list, we append an index as we write to disk\n\n if isinstance(field_list, (tuple, list)):\n # Flatten\n field_list_flat = list(flatten(field_list))\n\n # Iterate\n for suffix, file_to_write in enumerate(field_list_flat):\n field_filename = field_filename_base + f\"_{suffix}\"\n\n # Some cleaning\n while \"__\" in field_filename:\n field_filename = field_filename.replace(\"__\", \"_\")\n\n while field_filename[-1] == \"_\":\n field_filename = field_filename[:-1]\n\n # Save image!\n output_name = (\n pathlib.Path(output_directory)\n / parent_sorting_data\n / field\n / (field_filename + output_file_suffix)\n )\n files_written[field].append(output_name)\n\n if output_name.is_file():\n logger.warning(f\" File exists: {output_name}\")\n\n if overwrite_existing_files:\n logger.warning(\" You have selected to overwrite existing files.\")\n\n else:\n logger.info(\n \" You have selected to NOT overwrite existing files. Continuing.\"\n )\n continue\n\n sitk.WriteImage(file_to_write, output_name.as_posix())\n\n else:\n field_filename = field_filename_base\n file_to_write = field_list\n\n # Some cleaning\n while \"__\" in field_filename:\n field_filename = field_filename.replace(\"__\", \"_\")\n\n while field_filename[-1] == \"_\":\n field_filename = field_filename[:-1]\n\n # Save image!\n \"\"\"\n ! TO DO\n Use pathlib, and perform some checks so we don\"t overwrite anything!\n \"\"\"\n output_name = (\n pathlib.Path(output_directory)\n / parent_sorting_data\n / field\n / (field_filename + output_file_suffix)\n )\n files_written[field].append(output_name)\n\n if output_name.is_file():\n logger.warning(f\" File exists: {output_name}\")\n\n if overwrite_existing_files:\n logger.warning(\" You have selected to overwrite existing files.\")\n\n else:\n logger.info(\n \" You have selected to NOT overwrite existing files. Continuing.\"\n )\n continue\n\n sitk.WriteImage(file_to_write, output_name.as_posix())\n\n return files_written\n\n\ndef process_dicom_directory(\n dicom_directory,\n parent_sorting_field=\"PatientName\",\n output_image_name_format=\"{parent_sorting_data}_{study_uid_index}_{Modality}_{image_desc}_{SeriesNumber}\",\n output_structure_name_format=\"{parent_sorting_data}_{study_uid_index}_{Modality}_{structure_name}\",\n output_dose_name_format=\"{parent_sorting_data}_{study_uid_index}_{DoseSummationType}\",\n return_extra=True,\n output_directory=\"./\",\n output_file_suffix=\".nii.gz\",\n overwrite_existing_files=False,\n write_to_disk=True,\n verbose=False,\n initial_sop_class_name_default=\"UNKNOWN\",\n):\n\n # Check dicom_directory type\n if isinstance(dicom_directory, str) or isinstance(dicom_directory, pathlib.Path):\n # Get all the DICOM files in the given directory\n root_path = pathlib.Path(dicom_directory)\n # Find files ending with .dcm, .dc3\n dicom_file_list = [\n p\n for p in root_path.glob(\"**/*\")\n if p.name.lower().endswith(\".dcm\") or p.name.lower().endswith(\".dc3\")\n ]\n\n elif hasattr(dicom_directory, \"__iter__\"):\n dicom_file_list = []\n for dicom_dir in dicom_directory:\n # Get all the DICOM files in each directory\n root_path = pathlib.Path(dicom_dir)\n # Find files ending with .dcm, .dc3\n dicom_file_list += [\n p\n for p in root_path.glob(\"**/*\")\n if p.name.lower().endswith(\".dcm\") or p.name.lower().endswith(\".dc3\")\n ]\n\n if len(dicom_file_list) == 0:\n logger.info(\"No DICOM files found in input directory. Exiting now.\")\n return\n\n # Process the DICOM files\n # This returns a dictionary (of dictionaries):\n # {parent_data (e.g. PatientName): {series_UID_1: [list_of_DICOM_files],\n # {series_UID_2: [list_of_DICOM_files], ...\n # parent_data_2 : {series_UID_1: [list_of_DICOM_files],\n # {series_UID_2: [list_of_DICOM_files], ...\n # ... }\n dicom_series_dict_parent = process_dicom_file_list(\n dicom_file_list, parent_sorting_field=parent_sorting_field, verbose=verbose\n )\n\n if dicom_series_dict_parent is None:\n logger.info(\"No valid DICOM files found. Ending.\")\n return None\n\n output = {}\n\n for parent_data, dicom_series_dict in dicom_series_dict_parent.items():\n logger.info(f\"Processing data for {parent_sorting_field} = {parent_data}.\")\n logger.info(f\" Number of DICOM series = {len(dicom_series_dict.keys())}\")\n\n # Set up the output data\n # This stores the SimpleITK images and file names\n output_data_dict = {}\n\n # Set up the study UID dict\n # This helps match structure sets to relevant images\n # And paired images to each other (e.g. PET/CT)\n study_uid_dict = {}\n\n # Give some user feedback\n logger.debug(f\" Output image name format: {output_image_name_format}\")\n logger.debug(f\" Output structure name format: {output_structure_name_format}\")\n logger.debug(f\" Output dose name format: {output_dose_name_format}\")\n\n # For each unique series UID, process the DICOM files\n for series_uid in dicom_series_dict.keys():\n\n # This function returns four values\n # 1. dicom_type: This is IMAGES, STRUCTURES, DOSES, etc\n # 2. dicom_file_metadata: Some special metadata extracted from the DICOM header\n # 3. initial_dicom: The first DICOM in the series. For doses and structures there is\n # (usually) only one DICOM anyway\n # 4. dicom_file_data: The actual SimpleITK image data\n\n for (\n dicom_type,\n dicom_file_metadata,\n initial_dicom,\n dicom_file_data,\n ) in process_dicom_series(\n dicom_series_dict=dicom_series_dict,\n series_uid=series_uid,\n parent_sorting_field=parent_sorting_field,\n return_extra=return_extra,\n initial_sop_class_name_default=initial_sop_class_name_default,\n ):\n\n # Step 1\n # Check the parent sorting field is consistent\n # This would usually be the PatientName, PatientID, or similar\n # Occasionally these will both be blank\n\n parent_sorting_data = dicom_file_metadata[\"parent_sorting_data\"]\n\n if \"parent_sorting_data\" not in output_data_dict.keys():\n output_data_dict[\"parent_sorting_data\"] = parent_sorting_data\n\n else:\n if parent_sorting_data != output_data_dict[\"parent_sorting_data\"]:\n logger.error(\n f\"A conflict was found for the parent sorting field \"\n f\"({parent_sorting_field}): {parent_sorting_data}\"\n )\n logger.error(\"Quitting now.\")\n print(dicom_series_dict_parent.keys())\n sys.exit()\n else:\n logger.info(\n f\" Parent sorting field ({parent_sorting_field}) match found: \"\n f\"{parent_sorting_data}\"\n )\n\n # Step 2\n # Get the study UID\n # Used for indexing DICOM series\n\n study_uid = dicom_file_metadata[\"study_uid\"]\n\n if study_uid not in study_uid_dict.keys():\n try:\n study_uid_index = max(study_uid_dict.values()) + 1\n except AttributeError:\n study_uid_index = 0 # Study UID dict might not exist\n except ValueError:\n study_uid_index = 0 # Study UID dict might be empty\n\n logger.info(f\" Setting study instance UID index: {study_uid_index}\")\n\n study_uid_dict[study_uid] = study_uid_index\n\n else:\n logger.info(\n f\" Study instance UID index already exists: {study_uid_dict[study_uid]}\"\n )\n\n # Step 3\n # Generate names for output files\n\n # Special names\n # ! This can be defined once at the start of the function\n special_name_fields = [\n \"parent_sorting_data\",\n \"study_uid_index\",\n \"image_desc\",\n \"structure_name\",\n ]\n\n # Get the image description (other special names are already defined above)\n image_desc = get_dicom_info_from_description(\n initial_dicom, return_extra=return_extra\n )\n\n # Get all the fields from the user-given name format\n if dicom_type == \"IMAGES\":\n all_naming_fields = [\n i[i.find(\"{\") + 1 :]\n for i in output_image_name_format.split(\"}\")\n if len(i) > 0\n ]\n elif dicom_type == \"STRUCTURES\":\n all_naming_fields = [\n i[i.find(\"{\") + 1 :]\n for i in output_structure_name_format.split(\"}\")\n if len(i) > 0\n ]\n elif dicom_type == \"DOSES\":\n all_naming_fields = [\n i[i.find(\"{\") + 1 :]\n for i in output_dose_name_format.split(\"}\")\n if len(i) > 0\n ]\n\n # Now exclude those that aren't derived from the DICOM header\n dicom_header_tags = [i for i in all_naming_fields if i not in special_name_fields]\n\n naming_info_dict = {}\n for dicom_field in dicom_header_tags:\n try:\n dicom_field_value = initial_dicom[dicom_field].value\n except (AttributeError, KeyError):\n logger.warning(\n f\" Could not find DICOM header {dicom_field}. Setting as 0 to \"\n f\"preserve naming convention.\"\n )\n dicom_field_value = 0\n naming_info_dict[dicom_field] = dicom_field_value\n\n if dicom_type == \"IMAGES\":\n\n output_name = output_image_name_format.format(\n parent_sorting_data=parent_sorting_data,\n study_uid_index=study_uid_dict[study_uid],\n image_desc=image_desc,\n **naming_info_dict,\n )\n\n if \"IMAGES\" not in output_data_dict.keys():\n # Make a new entry\n output_data_dict[\"IMAGES\"] = {output_name: dicom_file_data}\n\n else:\n # First check if there is another image of the same name\n\n if output_name not in output_data_dict[\"IMAGES\"].keys():\n output_data_dict[\"IMAGES\"][output_name] = dicom_file_data\n\n else:\n logger.info(\" An image with this name exists, appending.\")\n\n if hasattr(output_data_dict[\"IMAGES\"][output_name], \"__iter__\"):\n output_data_dict[\"IMAGES\"][output_name] = list(\n [output_data_dict[\"IMAGES\"][output_name]]\n )\n\n output_data_dict[\"IMAGES\"][output_name].append(dicom_file_data)\n\n elif dicom_type == \"STRUCTURES\":\n\n for structure_name, structure_image in zip(\n dicom_file_metadata[\"structure_name_list\"], dicom_file_data\n ):\n\n output_name = output_structure_name_format.format(\n parent_sorting_data=parent_sorting_data,\n study_uid_index=study_uid_dict[study_uid],\n image_desc=image_desc,\n structure_name=structure_name,\n **naming_info_dict,\n )\n\n if \"STRUCTURES\" not in output_data_dict.keys():\n # Make a new entry\n output_data_dict[\"STRUCTURES\"] = {output_name: structure_image}\n\n else:\n # First check if there is another structure of the same name\n\n if output_name not in output_data_dict[\"STRUCTURES\"].keys():\n output_data_dict[\"STRUCTURES\"][output_name] = structure_image\n\n else:\n logger.info(\" A structure with this name exists, appending.\")\n if hasattr(\n output_data_dict[\"STRUCTURES\"][output_name], \"__iter__\"\n ):\n output_data_dict[\"STRUCTURES\"][output_name] = list(\n [output_data_dict[\"STRUCTURES\"][output_name]]\n )\n\n output_data_dict[\"STRUCTURES\"][output_name].append(structure_image)\n\n elif dicom_type == \"DOSES\":\n\n output_name = output_dose_name_format.format(\n parent_sorting_data=parent_sorting_data,\n study_uid_index=study_uid_dict[study_uid],\n **naming_info_dict,\n )\n\n if \"DOSES\" not in output_data_dict.keys():\n # Make a new entry\n output_data_dict[\"DOSES\"] = {output_name: dicom_file_data}\n\n else:\n # First check if there is another image of the same name\n\n if output_name not in output_data_dict[\"DOSES\"].keys():\n output_data_dict[\"DOSES\"][output_name] = dicom_file_data\n\n else:\n logger.info(\" An image with this name exists, appending.\")\n\n if isinstance(output_data_dict[\"DOSES\"][output_name], sitk.Image):\n output_data_dict[\"DOSES\"][output_name] = list(\n [output_data_dict[\"DOSES\"][output_name]]\n )\n\n output_data_dict[\"DOSES\"][output_name].append(dicom_file_data)\n\n if write_to_disk:\n output[str(parent_data)] = write_output_data_to_disk(\n output_data_dict=output_data_dict,\n output_directory=output_directory,\n output_file_suffix=output_file_suffix,\n overwrite_existing_files=overwrite_existing_files,\n )\n else:\n output[str(parent_data)] = output_data_dict\n\n \"\"\"\n TO DO!\n Memory issue with output_data_dict\n Use in inner loop, reset output_data_dict\n \"\"\"\n\n return output\n",
"# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport SimpleITK as sitk\nimport numpy as np\n\nfrom loguru import logger\n\nfrom platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure\n\nfrom platipy.imaging.registration.linear import (\n linear_registration,\n)\n\nfrom platipy.imaging.registration.deformable import (\n fast_symmetric_forces_demons_registration,\n)\n\nfrom platipy.imaging.label.fusion import (\n process_probability_image,\n compute_weight_map,\n combine_labels,\n)\nfrom platipy.imaging.label.iar import run_iar\n\nfrom platipy.imaging.utils.vessel import vessel_spline_generation\n\nfrom platipy.imaging.utils.valve import (\n generate_valve_from_great_vessel,\n generate_valve_using_cylinder,\n)\n\nfrom platipy.imaging.utils.conduction import (\n geometric_sinoatrialnode,\n geometric_atrioventricularnode,\n)\n\nfrom platipy.imaging.utils.crop import label_to_roi, crop_to_roi\n\nfrom platipy.imaging.generation.mask import extend_mask\n\nfrom platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap\n\nATLAS_PATH = \"/atlas\"\nif \"ATLAS_PATH\" in os.environ:\n ATLAS_PATH = os.environ[\"ATLAS_PATH\"]\n\nCARDIAC_SETTINGS_DEFAULTS = {\n \"atlas_settings\": {\n \"atlas_id_list\": [\n \"03\",\n \"05\",\n \"08\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"16\",\n \"24\",\n \"35\",\n ],\n \"atlas_structure_list\": [\n \"AORTICVALVE\",\n \"ASCENDINGAORTA\",\n \"LANTDESCARTERY\",\n \"LCIRCUMFLEXARTERY\",\n \"LCORONARYARTERY\",\n \"LEFTATRIUM\",\n \"LEFTVENTRICLE\",\n \"MITRALVALVE\",\n \"PULMONARYARTERY\",\n \"PULMONICVALVE\",\n \"RCORONARYARTERY\",\n \"RIGHTATRIUM\",\n \"RIGHTVENTRICLE\",\n \"SVC\",\n \"TRICUSPIDVALVE\",\n \"WHOLEHEART\",\n ],\n \"atlas_path\": ATLAS_PATH,\n \"atlas_image_format\": \"Case_{0}/Images/Case_{0}_CROP.nii.gz\",\n \"atlas_label_format\": \"Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz\",\n \"crop_atlas_to_structures\": False,\n \"crop_atlas_expansion_mm\": (20, 20, 40),\n \"guide_structure_name\": \"WHOLEHEART\",\n \"superior_extension\": 30,\n },\n \"auto_crop_target_image_settings\": {\n \"expansion_mm\": [20, 20, 40],\n },\n \"linear_registration_settings\": {\n \"reg_method\": \"affine\",\n \"shrink_factors\": [16, 8, 4],\n \"smooth_sigmas\": [0, 0, 0],\n \"sampling_rate\": 0.75,\n \"default_value\": -1000,\n \"number_of_iterations\": 50,\n \"metric\": \"mean_squares\",\n \"optimiser\": \"gradient_descent_line_search\",\n \"verbose\": False,\n },\n \"structure_guided_registration_settings\": {\n \"isotropic_resample\": True,\n \"resolution_staging\": [\n 16,\n 8,\n 2,\n ], # specify voxel size (mm) since isotropic_resample is set\n \"iteration_staging\": [50, 50, 50],\n \"smoothing_sigmas\": [0, 0, 0],\n \"ncores\": 8,\n \"default_value\": 0,\n \"verbose\": False,\n },\n \"deformable_registration_settings\": {\n \"isotropic_resample\": True,\n \"resolution_staging\": [\n 6,\n 3,\n 1.5,\n ], # specify voxel size (mm) since isotropic_resample is set\n \"iteration_staging\": [200, 150, 100],\n \"smoothing_sigmas\": [0, 0, 0],\n \"ncores\": 8,\n \"default_value\": 0,\n \"verbose\": False,\n },\n \"iar_settings\": {\n \"reference_structure\": False,\n \"smooth_distance_maps\": True,\n \"smooth_sigma\": 1,\n \"z_score_statistic\": \"mad\",\n \"outlier_method\": \"iqr\",\n \"outlier_factor\": 1.5,\n \"min_best_atlases\": 5,\n \"project_on_sphere\": False,\n },\n \"label_fusion_settings\": {\n \"vote_type\": \"unweighted\",\n \"vote_params\": None,\n \"optimal_threshold\": {\n \"AORTICVALVE\": 0.5,\n \"ASCENDINGAORTA\": 0.44,\n \"LEFTATRIUM\": 0.40,\n \"LEFTVENTRICLE\": 0.45,\n \"MITRALVALVE\": 0.5,\n \"PULMONARYARTERY\": 0.46,\n \"PULMONICVALVE\": 0.5,\n \"RIGHTATRIUM\": 0.38,\n \"RIGHTVENTRICLE\": 0.42,\n \"SVC\": 0.44,\n \"TRICUSPIDVALVE\": 0.5,\n \"WHOLEHEART\": 0.5,\n },\n },\n \"vessel_spline_settings\": {\n \"vessel_name_list\": [\n \"LANTDESCARTERY\",\n \"LCIRCUMFLEXARTERY\",\n \"LCORONARYARTERY\",\n \"RCORONARYARTERY\",\n ],\n \"vessel_radius_mm_dict\": {\n \"LANTDESCARTERY\": 2,\n \"LCIRCUMFLEXARTERY\": 2,\n \"LCORONARYARTERY\": 2,\n \"RCORONARYARTERY\": 2,\n },\n \"scan_direction_dict\": {\n \"LANTDESCARTERY\": \"z\",\n \"LCIRCUMFLEXARTERY\": \"z\",\n \"LCORONARYARTERY\": \"x\",\n \"RCORONARYARTERY\": \"z\",\n },\n \"stop_condition_type_dict\": {\n \"LANTDESCARTERY\": \"count\",\n \"LCIRCUMFLEXARTERY\": \"count\",\n \"LCORONARYARTERY\": \"count\",\n \"RCORONARYARTERY\": \"count\",\n },\n \"stop_condition_value_dict\": {\n \"LANTDESCARTERY\": 2,\n \"LCIRCUMFLEXARTERY\": 2,\n \"LCORONARYARTERY\": 2,\n \"RCORONARYARTERY\": 2,\n },\n },\n \"geometric_segmentation_settings\": {\n \"run_geometric_algorithms\": True,\n \"geometric_name_suffix\": \"_GEOMETRIC\",\n \"atlas_structure_names\": {\n \"atlas_left_ventricle\": \"LEFTVENTRICLE\",\n \"atlas_right_ventricle\": \"RIGHTVENTRICLE\",\n \"atlas_left_atrium\": \"LEFTATRIUM\",\n \"atlas_right_atrium\": \"RIGHTATRIUM\",\n \"atlas_ascending_aorta\": \"ASCENDINGAORTA\",\n \"atlas_pulmonary_artery\": \"PULMONARYARTERY\",\n \"atlas_superior_vena_cava\": \"SVC\",\n \"atlas_whole_heart\": \"WHOLEHEART\",\n },\n \"valve_definitions\": {\n \"mitral_valve_thickness_mm\": 10,\n \"mitral_valve_radius_mm\": 15,\n \"tricuspid_valve_thickness_mm\": 10,\n \"tricuspid_valve_radius_mm\": 15,\n \"pulmonic_valve_thickness_mm\": 10,\n \"aortic_valve_thickness_mm\": 10,\n },\n \"conduction_system_definitions\": {\n \"sinoatrial_node_radius_mm\": 10,\n \"atrioventricular_node_radius_mm\": 10,\n },\n },\n \"postprocessing_settings\": {\n \"run_postprocessing\": True,\n \"binaryfillhole_mm\": 3,\n \"structures_for_binaryfillhole\": [\n \"ASCENDINGAORTA\",\n \"LEFTATRIUM\",\n \"LEFTVENTRICLE\",\n \"RIGHTATRIUM\",\n \"RIGHTVENTRICLE\",\n \"SVC\",\n \"AORTICVALVE\",\n \"MITRALVALVE\",\n \"PULMONICVALVE\",\n \"TRICUSPIDVALVE\",\n \"WHOLEHEART\",\n ],\n \"structures_for_overlap_correction\": [\n \"ASCENDINGAORTA\",\n \"LEFTATRIUM\",\n \"LEFTVENTRICLE\",\n \"RIGHTATRIUM\",\n \"RIGHTVENTRICLE\",\n \"PULMONARYARTERY\",\n \"SVC\",\n ],\n },\n \"return_atlas_guide_structure\": False,\n \"return_as_cropped\": False,\n \"return_proba_as_contours\": False,\n}\n\n\ndef run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):\n \"\"\"Runs the atlas-based cardiac segmentation\n\n Args:\n img (sitk.Image):\n settings (dict, optional): Dictionary containing settings for algorithm.\n Defaults to default_settings.\n\n Returns:\n dict: Dictionary containing output of segmentation\n \"\"\"\n\n results = {}\n results_prob = {}\n\n return_as_cropped = settings[\"return_as_cropped\"]\n\n \"\"\"\n Initialisation - Read in atlases\n - image files\n - structure files\n\n Atlas structure:\n 'ID': 'Original': 'CT Image' : sitk.Image\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n 'RIR' : 'CT Image' : sitk.Image\n 'Transform' : transform parameter map\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n 'DIR' : 'CT Image' : sitk.Image\n 'Transform' : displacement field transform\n 'Weight Map' : sitk.Image\n 'Struct A' : sitk.Image\n 'Struct B' : sitk.Image\n\n\n \"\"\"\n\n logger.info(\"\")\n # Settings\n atlas_path = settings[\"atlas_settings\"][\"atlas_path\"]\n atlas_id_list = settings[\"atlas_settings\"][\"atlas_id_list\"]\n atlas_structure_list = settings[\"atlas_settings\"][\"atlas_structure_list\"]\n\n atlas_image_format = settings[\"atlas_settings\"][\"atlas_image_format\"]\n atlas_label_format = settings[\"atlas_settings\"][\"atlas_label_format\"]\n\n crop_atlas_to_structures = settings[\"atlas_settings\"][\"crop_atlas_to_structures\"]\n crop_atlas_expansion_mm = settings[\"atlas_settings\"][\"crop_atlas_expansion_mm\"]\n\n atlas_set = {}\n for atlas_id in atlas_id_list:\n atlas_set[atlas_id] = {}\n atlas_set[atlas_id][\"Original\"] = {}\n\n image = sitk.ReadImage(f\"{atlas_path}/{atlas_image_format.format(atlas_id)}\")\n\n structures = {\n struct: sitk.ReadImage(f\"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}\")\n for struct in atlas_structure_list\n }\n\n if crop_atlas_to_structures:\n logger.info(f\"Automatically cropping atlas: {atlas_id}\")\n\n original_volume = np.product(image.GetSize())\n\n crop_box_size, crop_box_index = label_to_roi(\n structures.values(), expansion_mm=crop_atlas_expansion_mm\n )\n\n image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)\n\n final_volume = np.product(image.GetSize())\n\n logger.info(f\" > Volume reduced by factor {original_volume/final_volume:.2f}\")\n\n for struct in atlas_structure_list:\n structures[struct] = crop_to_roi(\n structures[struct], size=crop_box_size, index=crop_box_index\n )\n\n atlas_set[atlas_id][\"Original\"][\"CT Image\"] = image\n\n for struct in atlas_structure_list:\n atlas_set[atlas_id][\"Original\"][struct] = structures[struct]\n\n \"\"\"\n Step 1 - Automatic cropping\n If we have a guide structure:\n - use structure to crop target image\n\n Otherwise:\n - using a quick registration to register each atlas\n - expansion of the bounding box to ensure entire volume of interest is enclosed\n - target image is cropped\n \"\"\"\n\n expansion_mm = settings[\"auto_crop_target_image_settings\"][\"expansion_mm\"]\n\n if guide_structure:\n\n crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)\n img_crop = crop_to_roi(img, crop_box_size, crop_box_index)\n\n guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)\n target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)\n\n else:\n quick_reg_settings = {\n \"reg_method\": \"similarity\",\n \"shrink_factors\": [8],\n \"smooth_sigmas\": [0],\n \"sampling_rate\": 0.75,\n \"default_value\": -1000,\n \"number_of_iterations\": 25,\n \"final_interp\": sitk.sitkLinear,\n \"metric\": \"mean_squares\",\n \"optimiser\": \"gradient_descent_line_search\",\n }\n\n registered_crop_images = []\n\n logger.info(\"Running initial Translation tranform to crop image volume\")\n\n for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:\n\n logger.info(f\" > atlas {atlas_id}\")\n\n # Register the atlases\n atlas_set[atlas_id][\"RIR\"] = {}\n atlas_image = atlas_set[atlas_id][\"Original\"][\"CT Image\"]\n\n reg_image, _ = linear_registration(\n img,\n atlas_image,\n **quick_reg_settings,\n )\n\n registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))\n\n del reg_image\n\n combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000\n\n crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)\n\n img_crop = crop_to_roi(img, crop_box_size, crop_box_index)\n\n logger.info(\"Calculated crop box:\")\n logger.info(f\" > {crop_box_index}\")\n logger.info(f\" > {crop_box_size}\")\n logger.info(f\" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}\")\n\n \"\"\"\n Step 2 - Rigid registration of target images\n - Individual atlas images are registered to the target\n - The transformation is used to propagate the labels onto the target\n \"\"\"\n linear_registration_settings = settings[\"linear_registration_settings\"]\n\n logger.info(\n f\"Running {linear_registration_settings['reg_method']} tranform to align atlas images\"\n )\n\n for atlas_id in atlas_id_list:\n # Register the atlases\n\n logger.info(f\" > atlas {atlas_id}\")\n\n atlas_set[atlas_id][\"RIR\"] = {}\n\n if guide_structure:\n guide_structure_name = settings[\"atlas_settings\"][\"guide_structure_name\"]\n target_reg_image = target_reg_structure\n atlas_reg_image = convert_mask_to_reg_structure(\n atlas_set[atlas_id][\"Original\"][guide_structure_name], expansion=2\n )\n\n else:\n target_reg_image = img_crop\n atlas_reg_image = atlas_set[atlas_id][\"Original\"][\"CT Image\"]\n\n _, initial_tfm = linear_registration(\n target_reg_image,\n atlas_reg_image,\n **linear_registration_settings,\n )\n\n # Save in the atlas dict\n atlas_set[atlas_id][\"RIR\"][\"Transform\"] = initial_tfm\n\n if guide_structure:\n atlas_set[atlas_id][\"RIR\"][\"Reg Mask\"] = apply_transform(\n input_image=atlas_reg_image,\n reference_image=img_crop,\n transform=initial_tfm,\n default_value=0,\n interpolator=sitk.sitkLinear,\n )\n\n expanded_atlas_guide_structure = extend_mask(\n atlas_set[atlas_id][\"Original\"][guide_structure_name],\n direction=(\"ax\", \"sup\"),\n extension_mm=settings[\"atlas_settings\"][\"superior_extension\"],\n interior_mm_shape=settings[\"atlas_settings\"][\"superior_extension\"] / 2,\n )\n\n atlas_set[atlas_id][\"RIR\"][guide_structure_name + \"EXPANDED\"] = apply_transform(\n input_image=expanded_atlas_guide_structure,\n reference_image=img_crop,\n transform=initial_tfm,\n default_value=0,\n interpolator=sitk.sitkNearestNeighbor,\n )\n\n atlas_set[atlas_id][\"RIR\"][\"CT Image\"] = apply_transform(\n input_image=atlas_set[atlas_id][\"Original\"][\"CT Image\"],\n reference_image=img_crop,\n transform=initial_tfm,\n default_value=-1000,\n interpolator=sitk.sitkLinear,\n )\n\n # sitk.WriteImage(rigid_image, f\"./RR_{atlas_id}.nii.gz\")\n\n for struct in atlas_structure_list:\n input_struct = atlas_set[atlas_id][\"Original\"][struct]\n atlas_set[atlas_id][\"RIR\"][struct] = apply_transform(\n input_image=input_struct,\n reference_image=img_crop,\n transform=initial_tfm,\n default_value=0,\n interpolator=sitk.sitkNearestNeighbor,\n )\n\n atlas_set[atlas_id][\"Original\"] = None\n\n \"\"\"\n Step 3 - Deformable image registration\n - Using Fast Symmetric Diffeomorphic Demons\n \"\"\"\n if guide_structure:\n structure_guided_registration_settings = settings[\"structure_guided_registration_settings\"]\n\n logger.info(\"Running structure-guided deformable registration on atlas labels\")\n\n for atlas_id in atlas_id_list:\n\n logger.info(f\" > atlas {atlas_id}\")\n\n # Register the atlases\n atlas_set[atlas_id][\"DIR_STRUCT\"] = {}\n\n deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(\n target_reg_structure,\n atlas_set[atlas_id][\"RIR\"][\"Reg Mask\"],\n **structure_guided_registration_settings,\n )\n\n # Save in the atlas dict\n atlas_set[atlas_id][\"DIR_STRUCT\"][\"Reg Mask\"] = deform_image\n atlas_set[atlas_id][\"DIR_STRUCT\"][\"Transform\"] = struct_guided_tfm\n\n atlas_set[atlas_id][\"DIR_STRUCT\"][\"CT Image\"] = apply_transform(\n input_image=atlas_set[atlas_id][\"RIR\"][\"CT Image\"],\n transform=struct_guided_tfm,\n default_value=-1000,\n interpolator=sitk.sitkLinear,\n )\n\n atlas_set[atlas_id][\"DIR_STRUCT\"][guide_structure_name + \"EXPANDED\"] = apply_transform(\n input_image=atlas_set[atlas_id][\"RIR\"][guide_structure_name + \"EXPANDED\"],\n reference_image=img_crop,\n transform=struct_guided_tfm,\n default_value=0,\n interpolator=sitk.sitkNearestNeighbor,\n )\n\n # sitk.WriteImage(deform_image, f\"./DIR_STRUCT_{atlas_id}.nii.gz\")\n\n for struct in atlas_structure_list:\n input_struct = atlas_set[atlas_id][\"RIR\"][struct]\n atlas_set[atlas_id][\"DIR_STRUCT\"][struct] = apply_transform(\n input_image=input_struct,\n transform=struct_guided_tfm,\n default_value=0,\n interpolator=sitk.sitkNearestNeighbor,\n )\n\n atlas_set[atlas_id][\"RIR\"] = None\n\n # Settings\n deformable_registration_settings = settings[\"deformable_registration_settings\"]\n\n logger.info(\"Running DIR to refine atlas image registration\")\n\n for atlas_id in atlas_id_list:\n\n logger.info(f\" > atlas {atlas_id}\")\n\n # Register the atlases\n atlas_set[atlas_id][\"DIR\"] = {}\n\n if guide_structure:\n label = \"DIR_STRUCT\"\n else:\n label = \"RIR\"\n\n atlas_reg_image = atlas_set[atlas_id][label][\"CT Image\"]\n target_reg_image = img_crop\n\n if guide_structure:\n expanded_atlas_mask = atlas_set[atlas_id][\"DIR_STRUCT\"][\n guide_structure_name + \"EXPANDED\"\n ]\n expanded_target_mask = extend_mask(\n guide_structure,\n direction=(\"ax\", \"sup\"),\n extension_mm=settings[\"atlas_settings\"][\"superior_extension\"],\n interior_mm_shape=settings[\"atlas_settings\"][\"superior_extension\"] / 2,\n )\n\n combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)\n\n atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)\n atlas_reg_image = sitk.Mask(\n atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000\n )\n\n target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)\n target_reg_image = sitk.Mask(\n target_reg_image, atlas_reg_image > -400, outsideValue=-1000\n )\n\n deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(\n target_reg_image,\n atlas_reg_image,\n **deformable_registration_settings,\n )\n\n # Save in the atlas dict\n atlas_set[atlas_id][\"DIR\"][\"Transform\"] = dir_tfm\n\n atlas_set[atlas_id][\"DIR\"][\"CT Image\"] = apply_transform(\n input_image=atlas_set[atlas_id][label][\"CT Image\"],\n transform=dir_tfm,\n default_value=-1000,\n interpolator=sitk.sitkLinear,\n )\n\n for struct in atlas_structure_list:\n input_struct = atlas_set[atlas_id][label][struct]\n atlas_set[atlas_id][\"DIR\"][struct] = apply_transform(\n input_image=input_struct,\n transform=dir_tfm,\n default_value=0,\n interpolator=sitk.sitkNearestNeighbor,\n )\n\n atlas_set[atlas_id][label] = None\n\n \"\"\"\n Step 4 - Iterative atlas removal\n - This is an automatic process that will attempt to remove inconsistent atlases from the entire set\n\n \"\"\"\n # Compute weight maps\n # Here we use simple GWV as this minises the potentially negative influence of mis-registered\n # atlases\n iar_settings = settings[\"iar_settings\"]\n\n if iar_settings[\"reference_structure\"]:\n\n for atlas_id in atlas_id_list:\n atlas_image = atlas_set[atlas_id][\"DIR\"][\"CT Image\"]\n weight_map = compute_weight_map(img_crop, atlas_image, vote_type=\"global\")\n atlas_set[atlas_id][\"DIR\"][\"Weight Map\"] = weight_map\n\n atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)\n\n else:\n logger.info(\"IAR: No reference structure, skipping iterative atlas removal.\")\n\n \"\"\"\n Step 4 - Vessel Splining\n\n \"\"\"\n vessel_spline_settings = settings[\"vessel_spline_settings\"]\n\n if len(vessel_spline_settings[\"vessel_name_list\"]) > 0:\n\n segmented_vessel_dict = vessel_spline_generation(\n img_crop, atlas_set, **vessel_spline_settings\n )\n else:\n logger.info(\"No vessel splining required, continue.\")\n\n \"\"\"\n Step 5 - Label Fusion\n \"\"\"\n # Compute weight maps\n vote_type = settings[\"label_fusion_settings\"][\"vote_type\"]\n vote_params = settings[\"label_fusion_settings\"][\"vote_params\"]\n\n # Compute weight maps\n for atlas_id in list(atlas_set.keys()):\n atlas_image = atlas_set[atlas_id][\"DIR\"][\"CT Image\"]\n weight_map = compute_weight_map(\n img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params\n )\n atlas_set[atlas_id][\"DIR\"][\"Weight Map\"] = weight_map\n\n combined_label_dict = combine_labels(atlas_set, atlas_structure_list)\n\n \"\"\"\n Step 6 - Paste the cropped structure into the original image space\n \"\"\"\n logger.info(\"Generating binary segmentations.\")\n template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)\n template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)\n\n vote_structures = settings[\"label_fusion_settings\"][\"optimal_threshold\"].keys()\n vote_structures = [i for i in vote_structures if i in atlas_structure_list]\n\n for structure_name in vote_structures:\n\n probability_map = combined_label_dict[structure_name]\n\n optimal_threshold = settings[\"label_fusion_settings\"][\"optimal_threshold\"][structure_name]\n\n binary_struct = process_probability_image(probability_map, optimal_threshold)\n\n if return_as_cropped:\n results[structure_name] = binary_struct\n\n if settings[\"return_proba_as_contours\"]:\n atlas_contours = [\n atlas_set[atlas_id][\"DIR\"][structure_name] >= 2 for atlas_id in atlas_id_list\n ]\n results_prob[structure_name] = binary_encode_structure_list(atlas_contours)\n\n else:\n results_prob[structure_name] = probability_map\n\n # We also generate another version of the guide_structure using the atlas contours\n # We *can* return this, but probably don't want to\n # Here this check is performed\n if (not settings[\"return_atlas_guide_structure\"]) and (guide_structure is not None):\n results[guide_structure_name] = guide_structure\n results_prob[guide_structure_name] = guide_structure\n\n else:\n if settings[\"return_proba_as_contours\"]:\n atlas_contours = [\n atlas_set[atlas_id][\"DIR\"][structure_name] >= 2 for atlas_id in atlas_id_list\n ]\n probability_img = binary_encode_structure_list(atlas_contours)\n template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)\n\n else:\n probability_img = probability_map\n\n # Un-crop binary structure\n paste_img_binary = sitk.Paste(\n template_img_binary,\n binary_struct,\n binary_struct.GetSize(),\n (0, 0, 0),\n crop_box_index,\n )\n results[structure_name] = paste_img_binary\n\n # Un-crop probability map\n paste_prob_img = sitk.Paste(\n template_img_prob,\n probability_img,\n probability_img.GetSize(),\n (0, 0, 0),\n crop_box_index,\n )\n results_prob[structure_name] = paste_prob_img\n\n # Un-crop the guide structure\n if (not settings[\"return_atlas_guide_structure\"]) and (guide_structure is not None):\n new_guide_structure = sitk.Paste(\n template_img_binary,\n guide_structure,\n guide_structure.GetSize(),\n (0, 0, 0),\n crop_box_index,\n )\n results[guide_structure_name] = new_guide_structure\n results_prob[guide_structure_name] = new_guide_structure\n\n for structure_name in vessel_spline_settings[\"vessel_name_list\"]:\n binary_struct = segmented_vessel_dict[structure_name]\n\n if return_as_cropped:\n results[structure_name] = binary_struct\n\n vessel_list = [\n atlas_set[atlas_id][\"DIR\"][structure_name] for atlas_id in list(atlas_set.keys())\n ]\n\n else:\n # Un-crop binary vessel\n paste_img_binary = sitk.Paste(\n template_img_binary,\n binary_struct,\n binary_struct.GetSize(),\n (0, 0, 0),\n crop_box_index,\n )\n results[structure_name] = paste_img_binary\n\n vessel_list = []\n for atlas_id in list(atlas_set.keys()):\n paste_img_binary = sitk.Paste(\n template_img_binary,\n atlas_set[atlas_id][\"DIR\"][structure_name],\n atlas_set[atlas_id][\"DIR\"][structure_name].GetSize(),\n (0, 0, 0),\n crop_box_index,\n )\n vessel_list.append(paste_img_binary)\n\n # Encode list of vessels\n encoded_vessels = binary_encode_structure_list(vessel_list)\n results_prob[structure_name] = encoded_vessels\n\n \"\"\"\n Step 7 - Geometric definitions of cardiac valves and conduction system nodes\n \"\"\"\n geometric_segmentation_settings = settings[\"geometric_segmentation_settings\"]\n\n if geometric_segmentation_settings[\"run_geometric_algorithms\"]:\n\n logger.info(\"Computing geometric definitions for valves and conduction system.\")\n\n geom_atlas_names = geometric_segmentation_settings[\"atlas_structure_names\"]\n geom_valve_defs = geometric_segmentation_settings[\"valve_definitions\"]\n geom_conduction_defs = geometric_segmentation_settings[\"conduction_system_definitions\"]\n\n # 1 - MITRAL VALVE\n mv_name = \"MITRALVALVE\" + geometric_segmentation_settings[\"geometric_name_suffix\"]\n results[mv_name] = generate_valve_using_cylinder(\n label_atrium=results[geom_atlas_names[\"atlas_left_atrium\"]],\n label_ventricle=results[geom_atlas_names[\"atlas_left_ventricle\"]],\n radius_mm=geom_valve_defs[\"mitral_valve_radius_mm\"],\n height_mm=geom_valve_defs[\"mitral_valve_thickness_mm\"],\n )\n\n # 2 - TRICUSPID VALVE\n tv_name = \"TRICUSPIDVALVE\" + geometric_segmentation_settings[\"geometric_name_suffix\"]\n results[tv_name] = generate_valve_using_cylinder(\n label_atrium=results[geom_atlas_names[\"atlas_right_atrium\"]],\n label_ventricle=results[geom_atlas_names[\"atlas_right_ventricle\"]],\n radius_mm=geom_valve_defs[\"tricuspid_valve_radius_mm\"],\n height_mm=geom_valve_defs[\"tricuspid_valve_thickness_mm\"],\n )\n\n # 3 - AORTIC VALVE\n av_name = \"AORTICVALVE\" + geometric_segmentation_settings[\"geometric_name_suffix\"]\n results[av_name] = generate_valve_from_great_vessel(\n label_great_vessel=results[geom_atlas_names[\"atlas_ascending_aorta\"]],\n label_ventricle=results[geom_atlas_names[\"atlas_left_ventricle\"]],\n valve_thickness_mm=geom_valve_defs[\"aortic_valve_thickness_mm\"],\n )\n\n # 4 - PULMONIC VALVE\n pv_name = \"PULMONICVALVE\" + geometric_segmentation_settings[\"geometric_name_suffix\"]\n results[pv_name] = generate_valve_from_great_vessel(\n label_great_vessel=results[geom_atlas_names[\"atlas_pulmonary_artery\"]],\n label_ventricle=results[geom_atlas_names[\"atlas_right_ventricle\"]],\n valve_thickness_mm=geom_valve_defs[\"pulmonic_valve_thickness_mm\"],\n )\n\n # 5 - SINOATRIAL NODE\n san_name = \"SAN\" + geometric_segmentation_settings[\"geometric_name_suffix\"]\n results[san_name] = geometric_sinoatrialnode(\n label_svc=results[geom_atlas_names[\"atlas_superior_vena_cava\"]],\n label_ra=results[geom_atlas_names[\"atlas_right_atrium\"]],\n label_wholeheart=results[geom_atlas_names[\"atlas_whole_heart\"]],\n radius_mm=geom_conduction_defs[\"sinoatrial_node_radius_mm\"],\n )\n\n # 6 - ATRIOVENTRICULAR NODE\n avn_name = \"AVN\" + geometric_segmentation_settings[\"geometric_name_suffix\"]\n results[avn_name] = geometric_atrioventricularnode(\n label_la=results[geom_atlas_names[\"atlas_left_atrium\"]],\n label_lv=results[geom_atlas_names[\"atlas_left_ventricle\"]],\n label_ra=results[geom_atlas_names[\"atlas_right_atrium\"]],\n label_rv=results[geom_atlas_names[\"atlas_right_ventricle\"]],\n radius_mm=geom_conduction_defs[\"atrioventricular_node_radius_mm\"],\n )\n\n \"\"\"\n Step 8 - Post-processing\n \"\"\"\n postprocessing_settings = settings[\"postprocessing_settings\"]\n\n if postprocessing_settings[\"run_postprocessing\"]:\n logger.info(\"Running post-processing.\")\n\n # Remove any smaller components and perform morphological closing (hole filling)\n binaryfillhole_img = [\n int(postprocessing_settings[\"binaryfillhole_mm\"] / sp) for sp in img.GetSpacing()\n ]\n\n for structure_name in postprocessing_settings[\"structures_for_binaryfillhole\"]:\n\n if structure_name not in results.keys():\n continue\n\n contour_s = results[structure_name]\n contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1\n contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)\n results[structure_name] = contour_s\n\n # Remove any overlaps\n input_overlap = {\n s: results[s] for s in postprocessing_settings[\"structures_for_overlap_correction\"]\n }\n output_overlap = correct_volume_overlap(input_overlap)\n\n for s in postprocessing_settings[\"structures_for_overlap_correction\"]:\n results[s] = output_overlap[s]\n\n if return_as_cropped:\n results[\"CROP_IMAGE\"] = img_crop\n\n logger.info(\"Done!\")\n\n return results, results_prob\n"
] |
[
[
"numpy.array",
"numpy.any",
"numpy.where",
"numpy.alen",
"numpy.cross"
],
[
"numpy.product"
]
] |
zouning68/nlp_transfer_learning
|
[
"e5010c5022c6cb0944cdbaaee402fd6d918fad3f"
] |
[
"xlnet-master/train_gpu.py"
] |
[
"\"\"\"Pretraining on GPUs.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\nimport math\nimport json\nimport time\nimport numpy as np\n\nfrom absl import flags\nimport absl.logging as _logging # pylint: disable=unused-import\n\nimport tensorflow as tf\n\nimport data_utils\nimport model_utils\nfrom gpu_utils import assign_to_gpu, average_grads_and_vars\nimport function_builder\n\n\n# GPU config\nflags.DEFINE_integer(\"num_hosts\", default=1,\n help=\"Number of hosts\")\nflags.DEFINE_integer(\"num_core_per_host\", default=8,\n help=\"Number of cores per host\")\nflags.DEFINE_bool(\"use_tpu\", default=False,\n help=\"Whether to use TPUs for training.\")\n\n# Experiment (data/checkpoint/directory) config\nflags.DEFINE_integer(\"num_passes\", default=1,\n help=\"Number of passed used for training.\")\nflags.DEFINE_string(\"record_info_dir\", default=None,\n help=\"Path to local directory containing `record_info-lm.json`.\")\nflags.DEFINE_string(\"model_dir\", default=None,\n help=\"Estimator model_dir.\")\nflags.DEFINE_string(\"init_checkpoint\", default=None,\n help=\"checkpoint path for initializing the model.\")\n\n# Optimization config\nflags.DEFINE_float(\"learning_rate\", default=1e-4,\n help=\"Maximum learning rate.\")\nflags.DEFINE_float(\"clip\", default=1.0,\n help=\"Gradient clipping value.\")\n# for cosine decay\nflags.DEFINE_float(\"min_lr_ratio\", default=0.001,\n help=\"Minimum ratio learning rate.\")\nflags.DEFINE_integer(\"warmup_steps\", default=0,\n help=\"Number of steps for linear lr warmup.\")\nflags.DEFINE_float(\"adam_epsilon\", default=1e-8,\n help=\"Adam epsilon\")\nflags.DEFINE_string(\"decay_method\", default=\"poly\",\n help=\"poly or cos\")\nflags.DEFINE_float(\"weight_decay\", default=0.0,\n help=\"weight decay\")\n\n# Training config\nflags.DEFINE_integer(\"train_batch_size\", default=16,\n help=\"Size of train batch.\")\nflags.DEFINE_integer(\"train_steps\", default=100000,\n help=\"Total number of training steps.\")\nflags.DEFINE_integer(\"iterations\", default=1000,\n help=\"Number of iterations per repeat loop.\")\nflags.DEFINE_integer(\"save_steps\", default=None,\n help=\"number of steps for model checkpointing.\")\n\n# Data config\nflags.DEFINE_integer('seq_len', default=0,\n help='Sequence length for pretraining.')\nflags.DEFINE_integer('reuse_len', default=0,\n help=\"How many tokens to be reused in the next batch. \"\n \"Could be half of seq_len\")\nflags.DEFINE_bool(\"bi_data\", default=True,\n help=\"Use bidirectional data streams, i.e., forward & backward.\")\nflags.DEFINE_integer(\"mask_alpha\", default=6,\n help=\"How many tokens to form a group.\")\nflags.DEFINE_integer(\"mask_beta\", default=1,\n help=\"How many tokens to mask within each group.\")\nflags.DEFINE_integer(\"num_predict\", default=None,\n help=\"Number of tokens to predict in partial prediction.\")\nflags.DEFINE_integer('perm_size', default=None,\n help='perm size.')\nflags.DEFINE_bool(\"uncased\", False,\n help=\"Use uncased inputs or not.\")\nflags.DEFINE_integer(\"n_token\", 32000, help=\"Vocab size\")\n\n# Model config\nflags.DEFINE_integer(\"mem_len\", default=0,\n help=\"Number of steps to cache\")\nflags.DEFINE_bool(\"same_length\", default=False,\n help=\"Same length attention\")\nflags.DEFINE_integer(\"clamp_len\", default=-1,\n help=\"Clamp length\")\n\nflags.DEFINE_integer(\"n_layer\", default=6,\n help=\"Number of layers.\")\nflags.DEFINE_integer(\"d_model\", default=32,\n help=\"Dimension of the model.\")\nflags.DEFINE_integer(\"d_embed\", default=32,\n help=\"Dimension of the embeddings.\")\nflags.DEFINE_integer(\"n_head\", default=4,\n help=\"Number of attention heads.\")\nflags.DEFINE_integer(\"d_head\", default=8,\n help=\"Dimension of each attention head.\")\nflags.DEFINE_integer(\"d_inner\", default=32,\n help=\"Dimension of inner hidden size in positionwise feed-forward.\")\nflags.DEFINE_float(\"dropout\", default=0.0,\n help=\"Dropout rate.\")\nflags.DEFINE_float(\"dropatt\", default=0.0,\n help=\"Attention dropout rate.\")\nflags.DEFINE_bool(\"untie_r\", default=False,\n help=\"Untie r_w_bias and r_r_bias\")\nflags.DEFINE_string(\"summary_type\", default=\"last\",\n help=\"Method used to summarize a sequence into a compact vector.\")\nflags.DEFINE_string(\"ff_activation\", default=\"relu\",\n help=\"Activation type used in position-wise feed-forward.\")\nflags.DEFINE_bool(\"use_bfloat16\", False,\n help=\"Whether to use bfloat16.\")\n\n# Parameter initialization\nflags.DEFINE_enum(\"init\", default=\"normal\",\n enum_values=[\"normal\", \"uniform\"],\n help=\"Initialization method.\")\nflags.DEFINE_float(\"init_std\", default=0.02,\n help=\"Initialization std when init is normal.\")\nflags.DEFINE_float(\"init_range\", default=0.1,\n help=\"Initialization std when init is uniform.\")\n\n\nFLAGS = flags.FLAGS\n\n\ndef get_model_fn():\n def model_fn(features, labels, mems, is_training):\n #### Get loss from inputs\n total_loss, new_mems, monitor_dict = function_builder.get_loss(\n FLAGS, features, labels, mems, is_training)\n\n #### Check model parameters\n num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])\n tf.logging.info('#params: {}'.format(num_params))\n\n # GPU\n assert is_training\n all_vars = tf.trainable_variables()\n grads = tf.gradients(total_loss, all_vars)\n grads_and_vars = list(zip(grads, all_vars))\n\n return total_loss, new_mems, grads_and_vars\n\n return model_fn\n\n\ndef single_core_graph(is_training, features, mems):\n model_fn = get_model_fn()\n\n model_ret = model_fn(\n features=features,\n labels=None,\n mems=mems,\n is_training=is_training)\n\n return model_ret\n\n\ndef create_mems_tf(bsz_per_core):\n mems = [tf.placeholder(dtype=tf.float32,\n shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model])\n for layer in range(FLAGS.n_layer)]\n\n return mems\n\n\ndef initialize_mems_np(bsz_per_core):\n mems_np = [np.zeros(shape=[FLAGS.mem_len, bsz_per_core, FLAGS.d_model],\n dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n\n return mems_np\n\n\ndef train(ps_device):\n ##### Get input function and model function\n\n train_input_fn, record_info_dict = data_utils.get_input_fn(\n tfrecord_dir=FLAGS.record_info_dir,\n split=\"train\",\n bsz_per_host=FLAGS.train_batch_size,\n seq_len=FLAGS.seq_len,\n reuse_len=FLAGS.reuse_len,\n bi_data=FLAGS.bi_data,\n num_hosts=1,\n num_core_per_host=1, # set to one no matter how many GPUs\n perm_size=FLAGS.perm_size,\n mask_alpha=FLAGS.mask_alpha,\n mask_beta=FLAGS.mask_beta,\n uncased=FLAGS.uncased,\n num_passes=FLAGS.num_passes,\n use_bfloat16=FLAGS.use_bfloat16,\n num_predict=FLAGS.num_predict)\n\n # for key, info in record_info_dict.items():\n tf.logging.info(\"num of batches {}\".format(record_info_dict[\"num_batch\"]))\n\n ##### Create input tensors / placeholders\n bsz_per_core = FLAGS.train_batch_size // FLAGS.num_core_per_host\n\n params = {\n \"batch_size\": FLAGS.train_batch_size # the whole batch\n }\n train_set = train_input_fn(params)\n\n example = train_set.make_one_shot_iterator().get_next()\n\n if FLAGS.num_core_per_host > 1:\n examples = [{} for _ in range(FLAGS.num_core_per_host)]\n for key in example.keys():\n vals = tf.split(example[key], FLAGS.num_core_per_host, 0)\n for device_id in range(FLAGS.num_core_per_host):\n examples[device_id][key] = vals[device_id]\n else:\n examples = [example]\n\n ##### Create computational graph\n tower_mems, tower_losses, tower_new_mems, tower_grads_and_vars = [], [], [], []\n\n for i in range(FLAGS.num_core_per_host):\n reuse = True if i > 0 else None\n with tf.device(assign_to_gpu(i, ps_device)), \\\n tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n\n # The mems for each tower is a dictionary\n mems_i = {}\n if FLAGS.mem_len:\n mems_i[\"mems\"] = create_mems_tf(bsz_per_core)\n\n loss_i, new_mems_i, grads_and_vars_i = single_core_graph(\n is_training=True,\n features=examples[i],\n mems=mems_i)\n\n tower_mems.append(mems_i)\n tower_losses.append(loss_i)\n tower_new_mems.append(new_mems_i)\n tower_grads_and_vars.append(grads_and_vars_i)\n\n ## average losses and gradients across towers\n if len(tower_losses) > 1:\n loss = tf.add_n(tower_losses) / len(tower_losses)\n grads_and_vars = average_grads_and_vars(tower_grads_and_vars)\n else:\n loss = tower_losses[0]\n grads_and_vars = tower_grads_and_vars[0]\n\n ## get train op\n train_op, learning_rate, gnorm = model_utils.get_train_op(FLAGS, None,\n grads_and_vars=grads_and_vars)\n global_step = tf.train.get_global_step()\n\n ##### Training loop\n # initialize mems\n tower_mems_np = []\n for i in range(FLAGS.num_core_per_host):\n mems_i_np = {}\n for key in tower_mems[i].keys():\n mems_i_np[key] = initialize_mems_np(bsz_per_core)\n tower_mems_np.append(mems_i_np)\n\n saver = tf.train.Saver()\n\n gpu_options = tf.GPUOptions(allow_growth=True)\n\n model_utils.init_from_checkpoint(FLAGS, global_vars=True)\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,\n gpu_options=gpu_options)) as sess:\n sess.run(tf.global_variables_initializer())\n\n fetches = [loss, tower_new_mems, global_step, gnorm, learning_rate, train_op]\n\n total_loss, prev_step = 0., -1\n while True:\n feed_dict = {}\n for i in range(FLAGS.num_core_per_host):\n for key in tower_mems_np[i].keys():\n for m, m_np in zip(tower_mems[i][key], tower_mems_np[i][key]):\n feed_dict[m] = m_np\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n loss_np, tower_mems_np, curr_step = fetched[:3]\n total_loss += loss_np\n\n if curr_step > 0 and curr_step % FLAGS.iterations == 0:\n curr_loss = total_loss / (curr_step - prev_step)\n tf.logging.info(\"[{}] | gnorm {:.2f} lr {:8.6f} \"\n \"| loss {:.2f} | pplx {:>7.2f}, bpc {:>7.4f}\".format(\n curr_step, fetched[-3], fetched[-2],\n curr_loss, math.exp(curr_loss), curr_loss / math.log(2)))\n total_loss, prev_step = 0., curr_step\n\n if curr_step > 0 and curr_step % FLAGS.save_steps == 0:\n save_path = os.path.join(FLAGS.model_dir, \"model.ckpt\")\n saver.save(sess, save_path)\n tf.logging.info(\"Model saved in path: {}\".format(save_path))\n\n if curr_step >= FLAGS.train_steps:\n break\n\n\ndef main(unused_argv):\n del unused_argv # Unused\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Get corpus info\n FLAGS.n_token = data_utils.VOCAB_SIZE\n tf.logging.info(\"n_token {}\".format(FLAGS.n_token))\n\n if not tf.gfile.Exists(FLAGS.model_dir):\n tf.gfile.MakeDirs(FLAGS.model_dir)\n\n train(\"/gpu:0\")\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] |
[
[
"tensorflow.trainable_variables",
"tensorflow.logging.set_verbosity",
"numpy.zeros",
"tensorflow.gfile.Exists",
"tensorflow.train.Saver",
"tensorflow.gradients",
"tensorflow.add_n",
"tensorflow.ConfigProto",
"tensorflow.gfile.MakeDirs",
"numpy.prod",
"tensorflow.placeholder",
"tensorflow.get_variable_scope",
"tensorflow.split",
"tensorflow.app.run",
"tensorflow.train.get_global_step",
"tensorflow.global_variables_initializer",
"tensorflow.GPUOptions"
]
] |
onurerkin/prohack
|
[
"51665841de04de4a7d44a3aeacec8e9142110cea"
] |
[
"src/models/dnn_regressor.py"
] |
[
"from typing import Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom src.models.dnn_regressor_funcs import (\n _compile_model,\n _create_keras_model,\n _fit_model,\n _to_input_list,\n)\n\n\ndef predict(model: tf.keras.Model, X_test: pd.DataFrame, cate_cols: list) -> np.array:\n \"\"\"\n predict function\n Args:\n model: keras model fit by fit_model\n X_test: Test features\n cate_cols: categorical columns list\n\n Returns: y_pred\n\n \"\"\"\n X_test_list = _to_input_list(df=X_test, cate_cols=cate_cols)\n y_pred = model.predict(X_test_list)\n return y_pred\n\n\ndef train(\n X_train: pd.DataFrame,\n y_train: Union[pd.Series, np.array],\n X_val: pd.DataFrame,\n y_val: Union[pd.Series, np.array],\n layers: list,\n num_classes: int,\n cate_cols: list,\n learning_rate: float,\n epochs: int,\n batch_size: int,\n dropout_rate: float = 0.3,\n) -> Tuple[tf.keras.callbacks.History, tf.keras.Model]:\n\n \"\"\"\n Training main function that takes dataset and parameters as input and returns the trained model with history\n Args:\n X_train: Train features\n y_train: train labels\n X_val: Validation labels\n y_val: validation labels\n layers: List of nodes in hidden layers\n num_classes: Number of classes in target variable\n cate_cols: categorical columns list\n learning_rate: learning rate\n epochs: number of epochs\n batch_size: batch size\n dropout_rate: dropout rate\n\n Returns: history of training, trained model\n\n \"\"\"\n\n X_train_list = _to_input_list(df=X_train, cate_cols=cate_cols)\n X_val_list = _to_input_list(df=X_val, cate_cols=cate_cols)\n\n # if len(y_train.shape) == 1:\n # y_train_categorical = tf.keras.utils.to_categorical(\n # y_train, num_classes=num_classes, dtype=\"float32\"\n # )\n #\n # y_val_categorical = tf.keras.utils.to_categorical(\n # y_val, num_classes=num_classes, dtype=\"float32\"\n # )\n y_train = np.array(y_train)\n y_val = np.array(y_val)\n\n model = _create_keras_model(\n X_train=X_train,\n layers=layers,\n num_classes=num_classes,\n dropout_rate=dropout_rate,\n cate_cols=cate_cols,\n )\n\n _compile_model(model=model, num_classes=num_classes, learning_rate=learning_rate)\n history = _fit_model(\n model=model,\n X_train_list=X_train_list,\n y_train=y_train,\n X_val_list=X_val_list,\n y_val=y_val,\n epochs=epochs,\n batch_size=batch_size,\n )\n\n return history, model\n"
] |
[
[
"numpy.array"
]
] |
Vantoine2019/PCBS_experience_subitizing
|
[
"55bdbfdce4d53f71572ad4afc3942f0e8f84dd66"
] |
[
"create_pictures.py"
] |
[
"\n\"\"\"\n\nCrรฉation des images pour la tรขche de dรฉtermination numรฉrique\n(pour รฉvaluer l'impact de la configuration sur le subitizing)\nVictor ANTOINE - victor.antoine@ens.fr \n\n\"\"\"\n\nimport pygame\nfrom random import sample\nfrom numpy import random, sort\nfrom os import path\nfrom itertools import product\n\nW, H = 960, 540\n\npygame.init()\nscreen = pygame.display.set_mode((W, H), pygame.DOUBLEBUF)\nscreen.fill((0, 0, 0))\n\n#crรฉation des images en disposition alรฉatoire\norigin_x, origin_y = random.randint(50, 910), random.randint(50, 490)\nlist_coord_random_x = list_coords_random_y = []\n\ndef create_liste_coord_random(axe, origin):\n coord1 = coord2 = origin\n liste = []\n liste.append(origin)\n while coord1 <= axe - 160:\n coord1 += 80\n liste.append(coord1) \n while coord2 >= 110:\n coord2 -= 80\n liste.append(coord2) \n liste = list(sort(liste))\n return liste\n\nlist_coord_random_x = create_liste_coord_random(W, origin_x)\nlist_coord_random_y = create_liste_coord_random(H, origin_y)\nsystem_coord_random = list(product(list_coord_random_x, list_coord_random_y))\n\nfor version in list(range(1, 11)):\n for points_number in list(range(1, 11)):\n screen.fill((0, 0, 0))\n for (x, y) in sample(system_coord_random, points_number):\n pygame.draw.circle(screen, (255, 255, 255), (x, y), 30, 0) \n pygame.image.save(screen, path.join(\"pictures\", \"random\", \\\n str(points_number) + \"_\" + str(version) + \".png\")) \n\n#crรฉation des images en dispostion configurationnelle\ndef create_slot_coord_config(top, left):\n liste_coord = []\n for position in [(1, 1), (3, 1), (2, 2), (1, 3), (3, 3)]:\n liste_coord.append((top + position[0] * ((W - 270)/8),\\\n left + position[1] * ((H - 270)/4)))\n return liste_coord\n\ncoord_left_side = create_slot_coord_config(130, 130)\ncoord_mid_side = create_slot_coord_config(303, 130)\ncoord_right_side = create_slot_coord_config(475, 130)\n\nsystem_coord_config = []\nposition = [[2], [1, 3], [1, 2, 3], [0, 1, 3, 4], [0, 1, 2, 3, 4]]\n\nfor number in range(1, 11):\n list_coord = []\n \n if number <= 5:\n positions = position[number-1]\n for circle in positions:\n list_coord.append(coord_mid_side[circle])\n system_coord_config.append(list_coord)\n \n else:\n for circle in position[4]:\n list_coord.append(coord_left_side[circle])\n positions = position[number-6]\n for circle in positions:\n list_coord.append(coord_right_side[circle])\n system_coord_config.append(list_coord)\n\nnumber_index = 1\nfor number in system_coord_config:\n screen.fill((0, 0, 0))\n for (x, y) in number:\n pygame.draw.circle(screen, (255, 255, 255), (int(x), int(y)), 30, 0)\n pygame.image.save(screen, path.join(\"pictures\", \"config\", \\\n str(number_index) + \".png\"))\n number_index += 1\n"
] |
[
[
"numpy.random.randint",
"numpy.sort"
]
] |
MayurJ20/bigmart
|
[
"be0aab9908f9f4f3701b57936bfc2fe91c10eaca"
] |
[
"bigmart.py"
] |
[
"import pickle\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\ndf = pd.read_csv('Train.csv')\r\n\r\n# check for categorical attributes\r\ncat_col = []\r\nfor x in df.dtypes.index:\r\n if df.dtypes[x] == 'object':\r\n cat_col.append(x)\r\ncat_col.remove('Item_Identifier')\r\ncat_col.remove('Outlet_Identifier')\r\n\r\nitem_weight_mean = df.pivot_table(values = \"Item_Weight\", index = 'Item_Identifier')\r\nmiss_bool = df['Item_Weight'].isnull()\r\nfor i, item in enumerate(df['Item_Identifier']):\r\n if miss_bool[i]:\r\n if item in item_weight_mean:\r\n df['Item_Weight'][i] = item_weight_mean.loc[item]['Item_Weight']\r\n else:\r\n df['Item_Weight'][i] = np.mean(df['Item_Weight'])\r\n\r\noutlet_size_mode = df.pivot_table(values='Outlet_Size', columns='Outlet_Type', aggfunc=(lambda x: x.mode()[0]))\r\nmiss_bool = df['Outlet_Size'].isnull()\r\ndf.loc[miss_bool, 'Outlet_Size'] = df.loc[miss_bool, 'Outlet_Type'].apply(lambda x: outlet_size_mode[x])\r\n# replace zeros with mean\r\ndf.loc[:, 'Item_Visibility'].replace([0], [df['Item_Visibility'].mean()], inplace=True)\r\n\r\n# combine item fat content\r\ndf['Item_Fat_Content'] = df['Item_Fat_Content'].replace({'LF':'Low Fat', 'reg':'Regular', 'low fat':'Low Fat'})\r\ndf['Item_Fat_Content'].value_counts()\r\n\r\n#Creation of New Attributes\r\ndf['New_Item_Type'] = df['Item_Identifier'].apply(lambda x: x[:2])\r\ndf['New_Item_Type'] = df['New_Item_Type'].map({'FD':'Food', 'NC':'Non-Consumable', 'DR':'Drinks'})\r\ndf.loc[df['New_Item_Type']=='Non-Consumable', 'Item_Fat_Content'] = 'Non-Edible'\r\n\r\n# create small values for establishment year\r\ndf['Outlet_Years'] = 2013 - df['Outlet_Establishment_Year']\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nle = LabelEncoder()\r\ndf['Outlet'] = le.fit_transform(df['Outlet_Identifier'])\r\ncat_col = ['Item_Fat_Content', 'Item_Type', 'Outlet_Size', 'Outlet_Location_Type', 'Outlet_Type', 'New_Item_Type']\r\nfor col in cat_col:\r\n df[col] = le.fit_transform(df[col])\r\n\r\n#Input Split\r\nX = df.drop(columns=['Outlet_Establishment_Year', 'Item_Identifier', 'Outlet_Identifier', 'Item_Outlet_Sales'])\r\nY = df['Item_Outlet_Sales']\r\n\r\n#Model Training\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.metrics import mean_squared_error\r\ndef train(model, X, Y):\r\n # train the model\r\n model.fit(X, Y) \r\n # predict the training set\r\n pred = model.predict(X) \r\n # perform cross-validation\r\n cv_score = cross_val_score(model, X, Y, scoring='neg_mean_squared_error', cv=5)\r\n cv_score = np.abs(np.mean(cv_score))\r\n\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nmodel = RandomForestRegressor()\r\ntrain(model, X, Y)\r\ncoef = pd.Series(model.feature_importances_, X.columns).sort_values(ascending=False)\r\n\r\nfile = open('model.pkl','wb')\r\n#dump information to that file\r\npickle.dump(model, file)"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"numpy.mean",
"sklearn.ensemble.RandomForestRegressor",
"pandas.Series",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score"
]
] |
vutriancode/mfea_autoscaling
|
[
"9672ce16c8a4353e8234d536e35e0eb8d1b72673"
] |
[
"lib/includes/utility.py"
] |
[
"\"\"\"\n Author: thangbk2209\n Project: Autoscaling\n Created: 3/15/19 16:48\n Purpose:\n\"\"\"\n\nimport random\nimport os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nimport tensorflow as tf\n\nfrom config import *\n\n\ndef draw_time_series(data, title, x_label, y_label, file_name):\n plt.plot(data)\n plt.title(title)\n plt.ylabel(y_label)\n plt.xlabel(x_label)\n # plt.legend([/], loc='upper left')\n plt.savefig(file_name + '.png')\n plt.show()\n plt.close()\n\n\ndef get_scaler(scaler_method):\n if scaler_method == 'min_max_scaler':\n return MinMaxScaler(feature_range=(0, 1))\n if scaler_method == 'standard_scaler':\n return StandardScaler()\n else:\n print(f'|-> ERROR: Not support {scaler_method}')\n\n\ndef get_activation(activation_name):\n if activation_name == 'sigmoid':\n return tf.nn.sigmoid\n elif activation_name == 'relu':\n return tf.nn.relu\n elif activation_name == 'tanh':\n return tf.nn.tanh\n elif activation_name == 'elu':\n return tf.nn.elu\n else:\n print(\">>> Can not apply your activation <<<\")\n\n\ndef get_optimizer(optimizer_name, lr):\n if optimizer_name == 'momentum':\n return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)\n elif optimizer_name == 'adam':\n return tf.train.AdamOptimizer(learning_rate=lr)\n elif optimizer_name == 'rmsprop':\n return tf.train.RMSPropOptimizer(learning_rate=lr)\n else:\n print(\">>> Can not apply your optimizer <<<\")\n\n\ndef early_stopping_decision(array, patience):\n value = array[len(array) - patience - 1]\n arr = array[len(array) - patience:]\n check = 0\n for val in arr:\n if(val > value):\n check += 1\n if(check == patience):\n return False\n else:\n return True\n\n\ndef draw_train_loss(loss_train, loss_valid, save_path):\n plt.plot(loss_train)\n plt.plot(loss_valid)\n\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.savefig(save_path)\n plt.close()\n\n\ndef average(arr):\n return sum(arr) / len(arr)\n\n\ndef create_name(**kwargs):\n key = list(kwargs.keys()) # collect the first key in kwargs dict\n name = []\n for _key in key:\n value = str(kwargs[_key]).replace('[', '')\n value = value.replace(']', '')\n _name = f'{_key}_{value}'\n name.append(_name)\n return '-'.join(name)\n\n\ndef generate_units_size(network_size, layer_size):\n\n assert network_size > 0, 'Network size invalid'\n assert layer_size > 0, 'Layer size invalid'\n\n num_units = []\n for i in range(network_size):\n # num_units.append(random.choice(range(1, layer_size, 1)))\n num_units.append(int(layer_size))\n if layer_size != 2:\n layer_size /= 2\n return num_units\n\n\ndef compute_scale_fitness_value(upper_prediction, lower_prediction, real_value):\n\n rate_real_value_in_prediction_interval = 0\n num_sample = len(upper_prediction)\n\n for i in range(num_sample):\n\n _real_value = real_value[i][0]\n lower_border = lower_prediction[i]\n higher_border = upper_prediction[i]\n\n if _real_value <= higher_border and _real_value >= lower_border:\n rate_real_value_in_prediction_interval += 1 / num_sample\n\n return rate_real_value_in_prediction_interval\n\n\ndef gen_folder_in_path(path):\n path_component = path.split('/')\n path_infor = ''\n for _path_component in path_component:\n path_infor += f'/{_path_component}'\n if not os.path.exists(path_infor):\n os.mkdir(path_infor)\n\n assert os.path.exists(path_infor), f'Can not generate folder in path {path}'\n"
] |
[
[
"tensorflow.train.AdamOptimizer",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.RMSPropOptimizer",
"matplotlib.pyplot.ylabel",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.show"
]
] |
gmberton/CosPlace
|
[
"0f03cc9fe25919c87627e92535f3693747617eae"
] |
[
"commons.py"
] |
[
"\nimport torch\nimport random\nimport numpy as np\n\n\nclass InfiniteDataLoader(torch.utils.data.DataLoader):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dataset_iterator = super().__iter__()\n \n def __iter__(self):\n return self\n \n def __next__(self):\n try:\n batch = next(self.dataset_iterator)\n except StopIteration:\n self.dataset_iterator = super().__iter__()\n batch = next(self.dataset_iterator)\n return batch\n\n\ndef make_deterministic(seed=0):\n \"\"\"Make results deterministic. If seed == -1, do not make deterministic.\n Running your script in a deterministic way might slow it down.\n Note that for some packages (eg: sklearn's PCA) this function is not enough.\n \"\"\"\n seed = int(seed)\n if seed == -1:\n return\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef setup_logging(output_folder, exist_ok=False, console=\"debug\",\n info_filename=\"info.log\", debug_filename=\"debug.log\"):\n \"\"\"Set up logging files and console output.\n Creates one file for INFO logs and one for DEBUG logs.\n Args:\n output_folder (str): creates the folder where to save the files.\n exist_ok (boolean): if False throw a FileExistsError if output_folder already exists\n debug (str):\n if == \"debug\" prints on console debug messages and higher\n if == \"info\" prints on console info messages and higher\n if == None does not use console (useful when a logger has already been set)\n info_filename (str): the name of the info file. if None, don't create info file\n debug_filename (str): the name of the debug file. if None, don't create debug file\n \"\"\"\n import os\n import sys\n import logging\n import traceback\n if not exist_ok and os.path.exists(output_folder):\n raise FileExistsError(f\"{output_folder} already exists!\")\n os.makedirs(output_folder, exist_ok=True)\n base_formatter = logging.Formatter('%(asctime)s %(message)s', \"%Y-%m-%d %H:%M:%S\")\n logger = logging.getLogger('')\n logger.setLevel(logging.DEBUG)\n \n if info_filename != None:\n info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')\n info_file_handler.setLevel(logging.INFO)\n info_file_handler.setFormatter(base_formatter)\n logger.addHandler(info_file_handler)\n \n if debug_filename != None:\n debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(base_formatter)\n logger.addHandler(debug_file_handler)\n \n if console != None:\n console_handler = logging.StreamHandler()\n if console == \"debug\": console_handler.setLevel(logging.DEBUG)\n if console == \"info\": console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(base_formatter)\n logger.addHandler(console_handler)\n \n def my_handler(type_, value, tb):\n logger.info(\"\\n\" + \"\".join(traceback.format_exception(type, value, tb)))\n logging.info(\"Experiment finished (with some errors)\")\n sys.excepthook = my_handler\n\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed_all"
]
] |
12564985/DeFMO
|
[
"8ed9c2963678e2c59c7431ec8786302eea841572"
] |
[
"renderer/render_fmo.py"
] |
[
"\"\"\" render_fmo.py renders obj file to rgb image with fmo model\n\nAviable function:\n- clear_mash: delete all the mesh in the secene\n- scene_setting_init: set scene configurations\n- node_setting_init: set node configurations\n- render: render rgb image for one obj file and one viewpoint\n- render_obj: wrapper function for render() render \n- init_all: a wrapper function, initialize all configurations \n= set_image_path: reset defualt image output folder\n\nauthor baiyu\nmodified by rozumden\n\"\"\"\nimport sys\nimport os\nimport random\nimport pickle\nimport bpy\nimport glob\nimport numpy as np\nfrom mathutils import Vector\nfrom mathutils import Euler\nimport cv2\nfrom PIL import Image\nfrom skimage.draw import line_aa\nfrom scipy import signal\nfrom skimage.measure import regionprops\n# import moviepy.editor as mpy\nfrom array2gif import write_gif\n\nabs_path = os.path.abspath(__file__)\nsys.path.append(os.path.dirname(abs_path))\n\nfrom render_helper import *\nfrom settings import *\nimport settings\nimport pdb\n\ndef renderTraj(pars, H):\n ## Input: pars is either 2x2 (line) or 2x3 (parabola)\n if pars.shape[1] == 2:\n pars = np.concatenate( (pars, np.zeros((2,1))),1)\n ns = 2\n else:\n ns = 5\n ns = np.max([2, ns])\n rangeint = np.linspace(0,1,ns)\n for timeinst in range(rangeint.shape[0]-1):\n ti0 = rangeint[timeinst]\n ti1 = rangeint[timeinst+1]\n start = pars[:,0] + pars[:,1]*ti0 + pars[:,2]*(ti0*ti0)\n end = pars[:,0] + pars[:,1]*ti1 + pars[:,2]*(ti1*ti1)\n start = np.round(start).astype(np.int32)\n end = np.round(end).astype(np.int32)\n rr, cc, val = line_aa(start[0], start[1], end[0], end[1])\n valid = np.logical_and(np.logical_and(rr < H.shape[0], cc < H.shape[1]), np.logical_and(rr > 0, cc > 0))\n rr = rr[valid]\n cc = cc[valid]\n val = val[valid]\n if len(H.shape) > 2:\n H[rr, cc, 0] = 0\n H[rr, cc, 1] = 0\n H[rr, cc, 2] = val\n else:\n H[rr, cc] = val \n return H\n\ndef open_log(temp_folder = g_temp): # redirect output to log file\n logfile = os.path.join(temp_folder,'blender_render.log')\n try:\n os.remove(logfile)\n except OSError:\n pass\n open(logfile, 'a').close()\n old = os.dup(1)\n sys.stdout.flush()\n os.close(1)\n os.open(logfile, os.O_WRONLY)\n return old\n\ndef close_log(old): # disable output redirection\n os.close(1)\n os.dup(old)\n os.close(old)\n\ndef clear_mesh():\n \"\"\" clear all meshes in the secene\n\n \"\"\"\n bpy.ops.object.select_all(action='DESELECT')\n for obj in bpy.data.objects:\n if obj.type == 'MESH':\n obj.select = True\n bpy.ops.object.delete()\n for block in bpy.data.meshes:\n if block.users == 0:\n bpy.data.meshes.remove(block)\n\n for block in bpy.data.materials:\n if block.users == 0:\n bpy.data.materials.remove(block)\n\n for block in bpy.data.textures:\n if block.users == 0:\n bpy.data.textures.remove(block)\n\n for block in bpy.data.images:\n if block.users == 0:\n bpy.data.images.remove(block)\n\ndef scene_setting_init(use_gpu):\n \"\"\"initialize blender setting configurations\n\n \"\"\"\n sce = bpy.context.scene.name\n bpy.data.scenes[sce].render.engine = g_engine_type\n bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent\n\n #output\n bpy.data.scenes[sce].render.image_settings.color_mode = g_rgb_color_mode\n bpy.data.scenes[sce].render.image_settings.color_depth = g_rgb_color_depth\n bpy.data.scenes[sce].render.image_settings.file_format = g_rgb_file_format\n bpy.data.scenes[sce].render.use_overwrite = g_depth_use_overwrite\n bpy.data.scenes[sce].render.use_file_extension = g_depth_use_file_extension \n\n if g_ambient_light:\n world = bpy.data.worlds['World']\n world.use_nodes = True\n bg = world.node_tree.nodes['Background']\n bg.inputs[0].default_value[:3] = g_bg_color \n bg.inputs[1].default_value = 1.0\n \n #dimensions\n bpy.data.scenes[sce].render.resolution_x = g_resolution_x\n bpy.data.scenes[sce].render.resolution_y = g_resolution_y\n bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage\n\n if use_gpu:\n bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu\n bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral\n bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral\n bpy.context.user_preferences.addons['cycles'].preferences.devices[0].use = False\n bpy.context.user_preferences.addons['cycles'].preferences.devices[1].use = True\n ndev = len(bpy.context.user_preferences.addons['cycles'].preferences.devices)\n print('Number of devices {}'.format(ndev))\n for ki in range(2,ndev):\n bpy.context.user_preferences.addons['cycles'].preferences.devices[ki].use = False\n bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'\n # bpy.types.CyclesRenderSettings.device = 'GPU'\n bpy.data.scenes[sce].cycles.device = 'GPU'\n\ndef node_setting_init():\n bpy.context.scene.use_nodes = True\n tree = bpy.context.scene.node_tree\n links = tree.links\n for node in tree.nodes:\n tree.nodes.remove(node)\n render_layer_node = tree.nodes.new('CompositorNodeRLayers')\n image_output_node = tree.nodes.new('CompositorNodeOutputFile')\n image_output_node.base_path = g_syn_rgb_folder\n\n links.new(render_layer_node.outputs[0], image_output_node.inputs[0])\n\n # image_output_node = bpy.context.scene.node_tree.nodes[1]\n image_output_node.base_path = g_temp\n image_output_node.file_slots[0].path = 'image-######.png' # blender placeholder #\n \n\ndef render(obj_path, viewpoint, temp_folder):\n \"\"\"render rbg image \n\n render a object rgb image by a given camera viewpoint and\n choose random image as background, only render one image\n at a time.\n\n Args:\n obj_path: a string variable indicate the obj file path\n viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)\n \"\"\"\n vp = viewpoint\n cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)\n cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)\n cam_obj = bpy.data.objects['Camera']\n cam_obj.location[0] = cam_location[0]\n cam_obj.location[1] = cam_location[1]\n cam_obj.location[2] = cam_location[2]\n cam_obj.rotation_euler[0] = cam_rot[0]\n cam_obj.rotation_euler[1] = cam_rot[1]\n cam_obj.rotation_euler[2] = cam_rot[2]\n if not os.path.exists(g_syn_rgb_folder):\n os.mkdir(g_syn_rgb_folder)\n\n obj = bpy.data.objects['model_normalized']\n \n ni = g_fmo_steps\n maxlen = 0.5\n maxrot = 1.57/6\n tri = 0\n # rot_base = np.array([math.pi/2,0,0])\n while tri <= g_max_trials:\n do_repeat = False\n tri += 1\n if not g_apply_texture:\n for oi in range(len(bpy.data.objects)): \n if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':\n continue\n for tempi in range(len(bpy.data.objects[oi].data.materials)): \n if bpy.data.objects[oi].data.materials[tempi].alpha != 1.0:\n return True, True ## transparent object\n\n los_start = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))\n loc_step = Vector((random.uniform(-maxlen/10, maxlen/10), random.uniform(-maxlen, maxlen), random.uniform(-maxlen, maxlen)))/ni\n \n rot_base = np.array((random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi), random.uniform(0, 2*math.pi)))\n rot_step = np.array((random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot), random.uniform(-maxrot, maxrot)))/ni\n old = open_log(temp_folder) \n for ki in [0, ni-1]+list(range(1,ni-1)):\n for oi in range(len(bpy.data.objects)): \n if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':\n continue\n bpy.data.objects[oi].location = los_start + loc_step*ki\n bpy.data.objects[oi].rotation_euler = Euler(rot_base + (rot_step*ki))\n bpy.context.scene.frame_set(ki + 1)\n bpy.ops.render.render(write_still=True) #start rendering\n if ki == 0 or ki == (ni-1):\n Mt = cv2.imread(os.path.join(bpy.context.scene.node_tree.nodes[1].base_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)[:,:,-1] > 0\n is_border = ((Mt[0,:].sum()+Mt[-1,:].sum()+Mt[:,0].sum()+Mt[:,-1].sum()) > 0) or Mt.sum()==0\n if is_border:\n if ki == 0:\n close_log(old)\n return False, True ## sample different starting viewpoint\n else:\n do_repeat = True ## just sample another motion direction\n if do_repeat:\n break\n close_log(old)\n if do_repeat == False:\n break\n if do_repeat: ## sample different starting viewpoint\n return False, True\n return False, False\n\ndef make_fmo(path, gt_path, video_path):\n n_im = 5\n background_images = os.listdir(g_background_image_path)\n seq_name = random.choice(background_images)\n seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,\"*.jpg\"))\n if len(seq_images) <= n_im:\n seq_images = glob.glob(os.path.join(g_background_image_path,seq_name,\"*.png\"))\n seq_images.sort()\n bgri = random.randint(n_im,len(seq_images)-1)\n bgr_path = seq_images[bgri]\n\n B0 = cv2.imread(bgr_path)/255\n B = cv2.resize(B0, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)\n B[B > 1] = 1\n B[B < 0] = 0\n FH = np.zeros(B.shape)\n MH = np.zeros(B.shape[:2])\n pars = np.array([[(B.shape[0]-1)/2-1, (B.shape[1]-1)/2-1], [1.0, 1.0]]).T\n FM = np.zeros(B.shape[:2]+(4,g_fmo_steps,))\n centroids = np.zeros((2,g_fmo_steps))\n for ki in range(g_fmo_steps):\n FM[:,:,:,ki] = cv2.imread(os.path.join(gt_path,'image-{:06d}.png'.format(ki+1)),cv2.IMREAD_UNCHANGED)/g_rgb_color_max\n props = regionprops((FM[:,:,-1,ki]>0).astype(int))\n if len(props) != 1:\n return False\n centroids[:,ki] = props[0].centroid\n for ki in range(g_fmo_steps):\n F = FM[:,:,:-1,ki]*FM[:,:,-1:,ki]\n M = FM[:,:,-1,ki]\n if ki < g_fmo_steps-1:\n pars[:,1] = centroids[:,ki+1] - centroids[:,ki]\n H = renderTraj(pars, np.zeros(B.shape[:2]))\n H /= H.sum()*g_fmo_steps\n for kk in range(3): \n FH[:,:,kk] += signal.fftconvolve(H, F[:,:,kk], mode='same')\n MH += signal.fftconvolve(H, M, mode='same')\n Im = FH + (1 - MH)[:,:,np.newaxis]*B\n Im[Im > 1] = 1\n Im[Im < 0] = 0\n if g_skip_low_contrast:\n Diff = np.sum(np.abs(Im - B),2)\n meanval = np.mean(Diff[MH > 0.05])\n print(\"Contrast {}\".format(meanval))\n if meanval < 0.2:\n return False\n if g_skip_small:\n sizeper = np.sum(MH > 0.01)/(MH.shape[0]*MH.shape[1])\n print(\"Size percentage {}\".format(sizeper))\n if sizeper < 0.05:\n return False\n \n Im = Im[:,:,[2,1,0]]\n Ims = Image.fromarray((Im * 255).astype(np.uint8))\n\n Ims.save(path)\n\n Ball = np.zeros(B.shape+(n_im,))\n Ball[:,:,:,0] = B\n for ki in range(1,n_im):\n bgrki_path = seq_images[bgri-ki]\n Ball[:,:,:,ki] = cv2.resize(cv2.imread(bgrki_path)/255, dsize=(int(g_resolution_x*g_resolution_percentage/100), int(g_resolution_y*g_resolution_percentage/100)), interpolation=cv2.INTER_CUBIC)\n Ball[Ball > 1] = 1\n Ball[Ball < 0] = 0\n Bmed = np.median(Ball,3)\n Image.fromarray((B[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr.png'))\n Image.fromarray((Bmed[:,:,[2,1,0]] * 255).astype(np.uint8)).save(os.path.join(gt_path,'bgr_med.png'))\n\n # Ims.save(os.path.join(g_temp,\"I.png\"))\n # Image.fromarray((FH * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,\"FH.png\"))\n # Image.fromarray((MH * 255).astype(np.uint8)).save(os.path.join(g_temp,\"MH.png\"))\n # Image.fromarray((M * 255).astype(np.uint8)).save(os.path.join(g_temp,\"M.png\"))\n # Image.fromarray((F * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,\"F.png\"))\n # Image.fromarray((B0 * 255)[:,:,[2,1,0]].astype(np.uint8)).save(os.path.join(g_temp,\"B.png\"))\n\n if False:\n Fwr = FM[:,:,:-1,:] * FM[:,:,-1:,:] + 1 * (1 - FM[:,:,-1:,:])\n Fwr = (Fwr * 255).astype(np.uint8)\n # Fwr[np.repeat(FM[:,:,-1:,:]==0,3,2)]=255 \n out = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc(*\"MJPG\"), 6, (F.shape[1],F.shape[0]),True)\n for ki in range(g_fmo_steps):\n out.write(Fwr[:,:,:,ki])\n out.release()\n \n return True\n\ndef render_obj(obj_path, path, objid, obj_name, temp_folder):\n \"\"\" render one obj file by a given viewpoint list\n a wrapper function for render()\n\n Args:\n obj_path: a string variable indicate the obj file path\n \"\"\"\n vps_path = random.sample(g_view_point_file, 1)[0]\n vps = list(load_viewpoint(vps_path))\n random.shuffle(vps)\n save_path = os.path.join(path,\"{}_{:04d}.png\".format(obj_name,objid))\n gt_path = os.path.join(path,\"GT\",\"{}_{:04d}\".format(obj_name,objid))\n video_path = os.path.join(path,\"{}_{:04d}.avi\".format(obj_name,objid))\n if not os.path.exists(gt_path):\n os.mkdir(gt_path)\n image_output_node = bpy.context.scene.node_tree.nodes[1]\n image_output_node.base_path = gt_path\n\n for imt in bpy.data.images:\n bpy.data.images.remove(imt)\n\n if g_apply_texture:\n for oi in range(len(bpy.data.objects)): \n if bpy.data.objects[oi].type == 'CAMERA' or bpy.data.objects[oi].type == 'LAMP':\n continue\n bpy.context.scene.objects.active = bpy.data.objects[oi]\n # pdb.set_trace()\n # for m in bpy.data.materials:\n # bpy.data.materials.remove(m)\n # bpy.ops.object.material_slot_remove()\n \n bpy.ops.object.editmode_toggle()\n bpy.ops.uv.cube_project()\n bpy.ops.object.editmode_toggle()\n\n texture_images = os.listdir(g_texture_path)\n texture = random.choice(texture_images)\n tex_path = os.path.join(g_texture_path,texture)\n \n # mat = bpy.data.materials.new(texture)\n # mat.use_nodes = True\n # nt = mat.node_tree\n # nodes = nt.nodes\n # links = nt.links\n\n # # Image Texture\n # textureNode = nodes.new(\"ShaderNodeTexImage\")\n # textureNode.image = bpy.data.images.load(tex_path)\n # links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])\n\n # mat.specular_intensity = 0\n\n # bpy.data.objects[oi].active_material = mat\n # print(bpy.data.objects[oi].active_material)\n for mat in bpy.data.materials:\n nodes = mat.node_tree.nodes\n links = mat.node_tree.links\n textureNode = nodes.new(\"ShaderNodeTexImage\")\n textureNode.image = bpy.data.images.load(tex_path)\n links.new(nodes['Diffuse BSDF'].inputs['Color'], textureNode.outputs['Color'])\n # print(bpy.data.objects[oi].active_material)\n \n tri = 0\n while tri <= g_max_trials:\n tri += 1\n vp = random.sample(vps, 1)[0]\n \n sample_different_object, sample_different_vp = render(obj_path, vp, temp_folder)\n\n if sample_different_vp:\n if sample_different_object:\n print('Transparent object!')\n return False\n print('Rendering failed, repeating')\n continue\n success = make_fmo(save_path, gt_path, video_path)\n if success:\n return True\n print('Making FMO failed, repeating')\n return False\n\ndef init_all():\n \"\"\"init everything we need for rendering\n an image\n \"\"\"\n scene_setting_init(g_gpu_render_enable)\n node_setting_init()\n cam_obj = bpy.data.objects['Camera']\n cam_obj.rotation_mode = g_rotation_mode\n\n if g_render_light:\n bpy.data.objects['Lamp'].data.energy = 50\n bpy.ops.object.lamp_add(type='SUN') \n bpy.data.objects['Sun'].data.energy = 5\n\n\n### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA\n\ninit_all()\n\nargv = sys.argv\nargv = argv[argv.index(\"--\") + 1:] \nstart_index = int(argv[0])\nstep_index = int(argv[1])\nprint('Start index {}, step index {}'.format(start_index, step_index))\ntemp_folder = g_syn_rgb_folder+g_render_objs[start_index]+'/'\n\nfor obj_name in g_render_objs[start_index:(start_index+step_index)]:\n print(\"Processing object {}\".format(obj_name))\n obj_folder = os.path.join(g_syn_rgb_folder, obj_name)\n if not os.path.exists(obj_folder):\n os.makedirs(obj_folder)\n if not os.path.exists(os.path.join(obj_folder,\"GT\")):\n os.mkdir(os.path.join(obj_folder,\"GT\"))\n\n num = g_shapenet_categlory_pair[obj_name]\n search_path = os.path.join(g_shapenet_path, num, '**','*.obj')\n pathes = glob.glob(search_path, recursive=True)\n random.shuffle(pathes)\n objid = 1\n tri = 0\n while objid <= g_number_per_category:\n print(\" instance {}\".format(objid))\n clear_mesh()\n path = random.sample(pathes, 1)[0]\n old = open_log(temp_folder)\n bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob=\"*.obj;*.mtl\", use_split_groups=False, use_split_objects=True)\n # bpy.ops.import_scene.obj(filepath=path)\n close_log(old)\n #combine_objects()\n #scale_objects(0.5)\n result = render_obj(path, obj_folder, objid, obj_name, temp_folder)\n if result:\n objid += 1\n tri = 0\n else:\n print('Error! Rendering another object from the category!')\n tri += 1\n if tri > g_max_trials:\n print('No object find in the category!!!!!!!!!')\n break"
] |
[
[
"numpy.max",
"numpy.array",
"scipy.signal.fftconvolve",
"numpy.zeros",
"numpy.median",
"numpy.sum",
"numpy.round",
"numpy.mean",
"numpy.logical_and",
"numpy.abs",
"numpy.linspace"
]
] |
hikjik/chainer
|
[
"20d4d70f5cdacc1f24f243443f5bebc2055c8f8e"
] |
[
"chainer/training/updaters/multiprocess_parallel_updater.py"
] |
[
"import multiprocessing\nimport warnings\n\nimport six\n\nfrom chainer.backends import cuda\nfrom chainer.dataset import convert\nfrom chainer import reporter\nfrom chainer.training.updaters import standard_updater\n\n\ntry:\n from cupy.cuda import nccl\n _available = True\nexcept Exception:\n _available = False\n\nimport numpy\n\n\nclass _Worker(multiprocessing.Process):\n\n def __init__(self, proc_id, pipe, master):\n super(_Worker, self).__init__()\n self.proc_id = proc_id\n self.pipe = pipe\n self.converter = master.converter\n self.model = master._master\n self.device = master._devices[proc_id]\n self.iterator = master._mpu_iterators[proc_id]\n self.n_devices = len(master._devices)\n\n def setup(self):\n _, comm_id = self.pipe.recv()\n self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,\n self.proc_id)\n\n self.model.to_gpu(self.device)\n self.reporter = reporter.Reporter()\n self.reporter.add_observer('main', self.model)\n self.reporter.add_observers('main',\n self.model.namedlinks(skipself=True))\n\n def run(self):\n dev = cuda.Device(self.device)\n dev.use()\n self.setup()\n while True:\n job, data = self.pipe.recv()\n if job == 'finalize':\n dev.synchronize()\n break\n if job == 'update':\n # For reducing memory\n self.model.cleargrads()\n\n batch = self.converter(self.iterator.next(), self.device)\n with self.reporter.scope({}): # pass dummy observation\n loss = _calc_loss(self.model, batch)\n\n self.model.cleargrads()\n loss.backward()\n del loss\n\n gg = gather_grads(self.model)\n nccl_data_type = _get_nccl_data_type(gg.dtype)\n null_stream = cuda.Stream.null\n self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,\n nccl_data_type, nccl.NCCL_SUM, 0,\n null_stream.ptr)\n del gg\n self.model.cleargrads()\n gp = gather_params(self.model)\n nccl_data_type = _get_nccl_data_type(gp.dtype)\n self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,\n null_stream.ptr)\n scatter_params(self.model, gp)\n del gp\n\n\nclass MultiprocessParallelUpdater(standard_updater.StandardUpdater):\n\n \"\"\"Implementation of a multiprocess parallel GPU Updater.\n\n This is an implementation of :class:`Updater` that uses multiple GPUs\n with multi-process data parallelism. It uses Nvidia NCCL for communication\n between multiple GPUs.\n\n It behaves similarly to\n :class:`~chainer.training.updaters.StandardUpdater`.\n The update routine is modified to support data-parallel\n computation on multiple GPUs in one machine.\n It is based on synchronous parallel SGD: it\n parallelizes the gradient computation over a mini-batch, and updates the\n parameters only in the main device.\n\n It does not transfer the values collected by :class:`Reporter` in the sub\n devices to the main device. So you can only see the reported values in\n the main device.\n\n Args:\n iterators: List of dataset iterator for the training dataset. The\n number of the iterators must be same to the number of GPUs you use.\n optimizer: Optimizer to update parameters. The model should be attached\n to the optimizer.\n converter: Converter function to build input arrays. Each batch\n extracted by the iterator is split equally between the devices and\n then passed with corresponding ``device`` option to this function.\n :func:`~chainer.dataset.concat_examples` is used by default.\n devices: Dictionary or list of devices to which the training data is\n sent. The master device will be the first one in the list or the\n value attached to the key ``'main'``.\n auto_new_epoch (bool): If ``True``,\n :meth:`~chainer.Optimizer.new_epoch` of the main optimizer is\n automatically called when the ``is_new_epoch`` attribute of the\n main iterator is ``True``.\n\n \"\"\"\n\n def __init__(self, iterators, optimizer, converter=convert.concat_examples,\n devices=None, auto_new_epoch=True):\n if not MultiprocessParallelUpdater.available():\n raise Exception(\n 'NCCL is not enabled. MultiprocessParallelUpdater '\n 'requires NCCL.\\n'\n 'Please reinstall CuPy after you install NCCL.\\n'\n '(see https://docs-cupy.chainer.org/en/latest/install.html)')\n try:\n cuda.cupy.cuda.driver.ctxGetCurrent()\n _cuda_initialized = True\n except cuda.cupy.cuda.driver.CUDADriverError:\n # The context is not initialized, it will be fine.\n _cuda_initialized = False\n if _cuda_initialized:\n raise RuntimeError(\n 'The CUDA context has been already initialized. '\n 'MultiprocessParallelUpdater assumes the context is '\n 'uninitialized. Please do not call CUDA API before '\n 'MultiprocessParallelUpdater creates processes.')\n\n assert len(iterators) == len(devices)\n for iterator in iterators[1:]:\n assert len(iterator.dataset) == len(iterators[0].dataset)\n\n # Correct optimizer parameters for new minibatch size\n optim = optimizer.__class__.__name__\n if optim in ('Adam', 'AdaGrad', 'RMSprop'):\n optimizer.eps *= len(devices)\n warnings.warn('optimizer.eps is changed to {} '\n 'by MultiprocessParallelUpdater for new batch size.'.\n format(optimizer.eps))\n elif optim in ('RMSpropGraves', 'AdaDelta'):\n optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta\n warnings.warn('optimizer.eps is changed to {} '\n 'by MultiprocessParallelUpdater for new batch size.'.\n format(optimizer.eps))\n elif hasattr(optimizer, 'lr'):\n optimizer.lr /= len(devices)\n warnings.warn('optimizer.lr is changed to {} '\n 'by MultiprocessParallelUpdater for new batch size.'.\n format(optimizer.lr))\n\n super(MultiprocessParallelUpdater, self).__init__(\n iterator=iterators[0],\n optimizer=optimizer,\n converter=converter,\n auto_new_epoch=auto_new_epoch,\n )\n\n if isinstance(devices, dict):\n devices = devices.copy()\n main = devices.pop('main')\n devices = list(six.itervalues(devices))\n devices = [main] + devices\n elif isinstance(devices, (list, tuple)):\n devices = list(devices)\n else:\n raise ValueError(\n 'devices argument should be either dict, list or tuple,'\n ' but {} was given.'.format(type(devices)))\n if devices is None or any(device is None for device in devices):\n raise ValueError('must specify GPU devices')\n\n self._master = optimizer.target\n self._devices = devices\n self._mpu_iterators = iterators\n self._initialized = False\n\n self._pipes = []\n self._workers = []\n self.comm = None\n\n @staticmethod\n def available():\n return _available\n\n def _send_message(self, message):\n for pipe in self._pipes:\n pipe.send(message)\n\n def setup_workers(self):\n if self._initialized:\n return\n self._initialized = True\n\n self._master.cleargrads()\n for i in six.moves.range(1, len(self._devices)):\n pipe, worker_end = multiprocessing.Pipe()\n worker = _Worker(i, worker_end, self)\n worker.start()\n self._workers.append(worker)\n self._pipes.append(pipe)\n\n with cuda.Device(self._devices[0]):\n self._master.to_gpu(self._devices[0])\n if len(self._devices) > 1:\n comm_id = nccl.get_unique_id()\n self._send_message(('set comm_id', comm_id))\n self.comm = nccl.NcclCommunicator(len(self._devices),\n comm_id, 0)\n\n def update_core(self):\n self.setup_workers()\n\n self._send_message(('update', None))\n with cuda.Device(self._devices[0]):\n # For reducing memory\n self._master.cleargrads()\n\n optimizer = self.get_optimizer('main')\n iterator = self.get_iterator('main')\n batch = iterator.next()\n batch = self.converter(batch, self._devices[0])\n\n loss = _calc_loss(self._master, batch)\n\n self._master.cleargrads()\n loss.backward()\n\n # NCCL: reduce grads\n null_stream = cuda.Stream.null\n if self.comm is not None:\n gg = gather_grads(self._master)\n nccl_data_type = _get_nccl_data_type(gg.dtype)\n self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,\n nccl_data_type, nccl.NCCL_SUM,\n 0, null_stream.ptr)\n scatter_grads(self._master, gg)\n del gg\n optimizer.update()\n if self.comm is not None:\n gp = gather_params(self._master)\n nccl_data_type = _get_nccl_data_type(gp.dtype)\n self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,\n 0, null_stream.ptr)\n\n if self.auto_new_epoch and iterator.is_new_epoch:\n optimizer.new_epoch(auto=True)\n\n def finalize(self):\n self._send_message(('finalize', None))\n\n for worker in self._workers:\n worker.join()\n\n\ndef _calc_loss(model, in_arrays):\n if isinstance(in_arrays, tuple):\n return model(*in_arrays)\n elif isinstance(in_arrays, dict):\n return model(**in_arrays)\n else:\n return model(in_arrays)\n\n\ndef size_num_grads(link):\n \"\"\"Count total size of all gradient arrays of a given link\n\n Args:\n link (chainer.link.Link): Target link object.\n \"\"\"\n size = 0\n num = 0\n for param in link.params():\n if param.size == 0:\n continue\n size += param.size\n num += 1\n return size, num\n\n\ndef _memcpy_gather():\n return cuda.elementwise(\n 'raw T ptrs, raw X dtypes, raw Y info',\n 'raw float32 dst',\n '''\n int id_min = id_pre;\n int id_max = num_src;\n while (id_max - id_min > 1) {\n int id = (id_max + id_min) / 2;\n if (i < info[id]) id_max = id;\n else id_min = id;\n }\n int id = id_min;\n\n int i_dst = i;\n int i_src = i;\n if (id > 0) i_src -= info[id];\n\n dst[i_dst] = 0;\n if (ptrs[id] != NULL) {\n if (dtypes[id] == 0) { // fp32\n float *src = reinterpret_cast<float *>(ptrs[id]);\n dst[i_dst] = src[i_src];\n }\n else { // fp16\n float16 *src = reinterpret_cast<float16 *>(ptrs[id]);\n dst[i_dst] = static_cast<float>(src[i_src]);\n }\n }\n id_pre = id;\n ''',\n '_memcpy_gather',\n loop_prep='''\n int num_src = info[0];\n int id_pre = 0;\n ''')\n\n\ndef _gather(link, target):\n size, num = size_num_grads(link)\n\n ptrs = numpy.empty(num, dtype=numpy.uint64)\n dtypes = numpy.empty(num, dtype=numpy.int8)\n info = numpy.empty(num + 1, dtype=numpy.int32)\n info[0] = 0\n i = 0\n for _, param in sorted(link.namedparams()):\n if param.size == 0:\n continue\n ptrs[i] = 0 # NULL pointer\n d = getattr(param, target)\n if d is not None:\n ptrs[i] = d.data.ptr\n dtypes[i] = 0 # fp32\n if param.dtype == numpy.float16:\n dtypes[i] = 1 # fp16\n info[i + 1] = info[i] + param.size\n i += 1\n info[0] = num\n\n ptrs = cuda.to_gpu(ptrs)\n dtypes = cuda.to_gpu(dtypes)\n info = cuda.to_gpu(info)\n\n return _memcpy_gather()(ptrs, dtypes, info, size=size)\n\n\ndef gather_grads(link):\n \"\"\"Put together all gradient arrays and make a single array\n\n Args:\n link (chainer.link.Link): Target link object.\n Return:\n cupy.ndarray\n \"\"\"\n if link.xp is numpy:\n raise RuntimeError('gather_grads works only on GPU.')\n return _gather(link, 'grad')\n\n\ndef gather_params(link):\n \"\"\"Put together all gradient arrays and make a single array\n\n Args:\n link (chainer.link.Link): Target link object.\n Return:\n cupy.ndarray\n \"\"\"\n if link.xp is numpy:\n raise RuntimeError('Link.gather_params works only on GPU.')\n return _gather(link, 'data')\n\n\ndef _memcpy_scatter():\n return cuda.elementwise(\n 'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',\n '',\n '''\n int id_min = id_pre;\n int id_max = num_src;\n while (id_max - id_min > 1) {\n int id = (id_max + id_min) / 2;\n if (i < info[id]) id_max = id;\n else id_min = id;\n }\n int id = id_min;\n\n int i_src = i;\n int i_dst = i;\n if (id > 0) i_dst -= info[id];\n\n if (ptrs[id] != NULL) {\n if (dtypes[id] == 0) { // fp32\n float *dst = reinterpret_cast<float *>(ptrs[id]);\n dst[i_dst] = array[i_src];\n }\n else { // fp16\n float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);\n dst[i_dst] = static_cast<float16>(array[i_src]);\n }\n }\n id_pre = id;\n ''',\n '_memcpy_scatter',\n loop_prep='''\n int num_src = info[0];\n int id_pre = 0;\n ''')\n\n\ndef _scatter(link, array, target):\n size, num = size_num_grads(link)\n\n ptrs = numpy.zeros(num, dtype=numpy.uint64)\n dtypes = numpy.zeros(num, dtype=numpy.int8)\n info = numpy.zeros(num + 1, dtype=numpy.int32)\n info[0] = 0\n i = 0\n for _, param in sorted(link.namedparams()):\n if param.size == 0:\n continue\n ptrs[i] = 0 # NULL pointer\n d = getattr(param, target)\n if d is None:\n d = cuda.cupy.zeros(param.shape, dtype=param.dtype)\n setattr(param, target, d)\n ptrs[i] = d.data.ptr\n dtypes[i] = 0 # fp32\n if param.dtype == numpy.float16:\n dtypes[i] = 1 # fp16\n info[i + 1] = info[i] + param.size\n i += 1\n if i != num:\n raise()\n info[0] = num\n\n ptrs = cuda.to_gpu(ptrs)\n dtypes = cuda.to_gpu(dtypes)\n info = cuda.to_gpu(info)\n\n return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)\n\n\ndef scatter_grads(link, array):\n \"\"\"Put back contents of the specified array to the related gradient arrays\n\n Args:\n link (chainer.link.Link): Target link object.\n array (cupy.ndarray): gathered array created by gather_grads()\n \"\"\"\n return _scatter(link, array, 'grad')\n\n\ndef scatter_params(link, array):\n \"\"\"Put back contents of the specified array to the related gradient arrays\n\n Args:\n link (chainer.link.Link): Target link object.\n array (cupy.ndarray): gathered array created by gather_params()\n \"\"\"\n return _scatter(link, array, 'data')\n\n\ndef _get_nccl_data_type(dtype):\n \"\"\"Get data type for NCCL\"\"\"\n\n if dtype == numpy.float32:\n nccl_data_type = nccl.NCCL_FLOAT\n elif dtype == numpy.float16:\n nccl_data_type = nccl.NCCL_HALF\n elif dtype == numpy.float64:\n nccl_data_type = nccl.NCCL_DOUBLE\n else:\n raise RuntimeError('Unexpected data type:{}'.format(dtype))\n\n return nccl_data_type\n"
] |
[
[
"numpy.empty",
"numpy.zeros"
]
] |
n-Holmes/deutscheflash
|
[
"1f974a3fffe771c0e552fa40123b27fa3f24674f"
] |
[
"deutscheflash.py"
] |
[
"\"\"\"A simple CLI app to practice grammatical genders of German nouns.\"\"\"\n\nimport argparse\nimport json\nimport pathlib\n\nimport pandas as pd\n\n\nclass WordList:\n \"\"\"Data structure to store a pandas dataframe and some structural details.\n \n Args:\n path (pathlib.Path or None): The path (without suffix) to a wordlist.\n If there is no current list at the path, will create a new list.\n If no path is provided the WordList will not be fully initialized and will\n require a subsequent call of `load` or `new`.\n \"\"\"\n\n def __init__(self, path=None):\n self.words = None\n self.structure = {}\n\n if path is not None:\n self.load(path)\n\n def load(self, path: pathlib.Path):\n \"\"\"Load stored data.\"\"\"\n try:\n self.words = pd.read_csv(path.with_suffix(\".csv\"))\n with path.with_suffix(\".json\").open() as f:\n self.structure = json.loads(f.read())\n self.words.set_index(self.structure[\"index\"], inplace=True)\n\n except FileNotFoundError as exception:\n raise FileNotFoundError(\n \"No word list found with the specified name.\"\n ) from exception\n\n def new(self, language: str = \"german\", score_inertia: int = 2):\n \"\"\"Create a new wordlist.\n \n Args:\n language (str): The name of a language in the GENDERS dictionary.\n score_inertia (int): Determines how resistant scores are to change.\n Must be a positive integer. Higher values will require more consecutive\n correct answers to reduce the frequency of a specific word.\n \"\"\"\n gender_options = get_languages()\n try:\n genders = gender_options[language]\n except KeyError as exception:\n raise ValueError(f\"Unknown language: {language}\") from exception\n\n columns = [\"Word\", \"Gender\", \"Correct\", \"Wrong\", \"Weight\"]\n\n self.structure = {\n \"language\": language,\n \"genders\": genders,\n \"aliases\": self._get_aliases(genders),\n \"default guesses\": score_inertia,\n \"index\": \"Word\",\n \"column count\": 3,\n }\n self.words = pd.DataFrame(columns=columns)\n self.words.set_index(self.structure[\"index\"], inplace=True)\n\n def save(self, path: pathlib.Path):\n \"\"\"Saves words to a .csv file and structure to a .json.\"\"\"\n self.words.to_csv(path.with_suffix(\".csv\"))\n with path.with_suffix(\".json\").open(mode=\"w\") as f:\n f.write(json.dumps(self.structure))\n\n def format_gender(self, gender_string: str):\n \"\"\"Attempts to find a matching gender for gender_string.\n \n Args:\n gender_string (str): A gender for the word list or an alias of a gender.\n \n Returns:\n The associated gender.\n \n Raises:\n ValueError: `gender_string` does not match any gender or alias.\n \"\"\"\n gender_string = gender_string.lower()\n if gender_string in self.structure[\"genders\"]:\n return gender_string\n if gender_string in self.structure[\"aliases\"]:\n return self.structure[\"aliases\"][gender_string]\n\n raise ValueError(f\"Unknown gender: {gender_string}\")\n\n def add(self, gender: str, word: str):\n \"\"\"Add a new word to the list.\n \n Args:\n gender (str): The gender of the word being added.\n word (str): The word to add.\n \n Raises:\n ValueError: `gender` does not match the current wordlist or the word is\n already present in the list.\n \"\"\"\n gender = self.format_gender(gender)\n word = word.capitalize()\n\n if gender not in self.structure[\"genders\"]:\n raise ValueError(\n f\"{gender} is not a valid gender for the current wordlist.\"\n )\n if word in self.words.index:\n raise ValueError(f\"{word} is already included.\")\n\n n_genders = len(self.structure[\"genders\"])\n row = [\n gender,\n self.structure[\"default guesses\"],\n self.structure[\"default guesses\"] * (n_genders - 1),\n (n_genders - 1) / n_genders,\n ]\n self.words.loc[word] = row\n\n def get_words(self, n: int, distribution: str = \"weighted\"):\n \"\"\"Selects and returns a sample of words and their genders.\n\n Args:\n n (int): The number of results wanted.\n distribution (str): The sampling method to use. Either `uniform` or\n `weighted`.\n\n Yields:\n A tuple of strings in the format (word, gender).\n \"\"\"\n if distribution == \"uniform\":\n sample = self.words.sample(n=n)\n\n elif distribution == \"weighted\":\n sample = self.words.sample(n=n, weights=\"Weight\")\n\n else:\n raise ValueError(f\"Unknown value for distribution: {distribution}\")\n\n for row in sample.iterrows():\n yield row[0], row[1].Gender\n\n def update_weight(self, word, guess):\n \"\"\"Update the weighting on a word based on the most recent guess.\n \n Args:\n word (str): The word to update. Should be in the index of self.words.\n guess (bool): Whether the guess was correct or not.\n \"\"\"\n\n row = self.words.loc[word]\n if guess:\n row.Correct += 1\n else:\n row.Wrong += 1\n\n n_genders = len(self.structure[\"genders\"])\n total = row.Correct + row.Wrong\n if not total % n_genders:\n # Throw away some data as evenly as possible to allow for change over time\n # Never throw away the last negative result to avoid question being lost.\n if row.Correct:\n wrongs_to_throw = min(row.Wrong - 1, n_genders - 1)\n row.Wrong -= wrongs_to_throw\n row.Correct -= n_genders - wrongs_to_throw\n else:\n row.wrong -= n_genders\n\n row.Weight = row.Wrong / (row.Correct + row.Wrong)\n\n self.words.loc[word] = row\n\n @staticmethod\n def _get_aliases(genders: dict):\n \"\"\"Create a dictionary of aliases and the genders they refer to.\n May have issues if multiple genders have the same article or first letter.\n \"\"\"\n aliases = {}\n for gender, article in genders.items():\n aliases[gender[0]] = gender\n aliases[article] = gender\n return aliases\n\n\ndef force_console_input(\n query: str,\n allowable,\n onfail: str = \"Input not recognised, please try again.\\n\",\n case_sensitive=False,\n):\n \"\"\"Get an input from the user matching some string in allowable.\n\n Args:\n query (str): The query to issue the user with.\n allowable (str or container): The options which the user is allowed to submit.\n If this is a string, acceptable answers will be substrings.\n For containers acceptable answers will be elements of the container.\n \n Returns:\n The correct input returned\n \n Raises:\n IOError: A request to quit was submitted.\n \"\"\"\n if not allowable:\n raise ValueError(\"At least one entry must be allowable.\")\n\n submission = input(query)\n while True:\n if not case_sensitive:\n submission = submission.lower()\n\n if submission in (\"quit\", \"exit\"):\n raise IOError(\"Exit command received.\")\n if submission in allowable:\n return submission\n\n submission = input(onfail)\n\n\ndef get_languages():\n \"\"\"Gets the language: genders dictionary.\"\"\"\n with open(\"genders.json\", \"r\") as f:\n return json.loads(f.read())\n\n\ndef main():\n \"\"\"Orchestration function for the CLI.\"\"\"\n args = _parse_args()\n path = pathlib.Path(\"lists\", args.words)\n\n try:\n words = _load_words(path)\n except IOError:\n print(\"Exiting.\")\n return\n\n if args.quiz_length is not None:\n if args.quiz_length == 0:\n print(\"Starting quiz in endless mode. Answer `quit` to end the quiz.\")\n correct, answered = _quiz_endless(words)\n elif args.quiz_length > 0:\n print(f\"Starting quiz with length {args.quiz_length}...\\n\")\n correct, answered, _ = _quiz(words, args.quiz_length)\n else:\n raise ValueError(f\"Invalid quiz length: {args.quiz_length}.\")\n\n print(f\"\\nYou successfully answered {correct} out of {answered} questions!\")\n\n elif args.add_words:\n print(\"Entering word addition mode...\")\n _add_words(words)\n\n elif args.load_words:\n print(f\"Importing word file {args.load_words}...\")\n added, reps = _import_words(words, args.load_words)\n print(f\"{added} words successfully imported. {reps} duplicates skipped.\")\n\n elif args.reset_scores:\n print(\"Resetting scores\")\n words = WordList()\n words.new()\n _import_words(words, path.with_suffix(\".csv\"))\n\n _save_and_exit(words, path)\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\n description=\"Flashcard app for German grammatical genders.\"\n )\n mode = parser.add_mutually_exclusive_group(required=True)\n mode.add_argument(\n \"-q\", \"--quiz\", type=int, help=\"Start the app in quiz mode.\", dest=\"quiz_length\"\n )\n mode.add_argument(\n \"-a\",\n \"--add-words\",\n action=\"store_true\",\n help=\"Start the app in manual word addition mode.\",\n )\n mode.add_argument(\n \"-l\",\n \"--load-words\",\n help=\"Concatenates a prewritten list of words into the saved WordList.\",\n )\n mode.add_argument(\n \"-r\",\n \"--reset-scores\",\n action=\"store_true\",\n help=\"Reset all scores in the specified word list.\",\n )\n parser.add_argument(\n \"-w\", \"--words\", default=\"main_list\", help=\"The name of the WordList to use.\"\n )\n return parser.parse_args()\n\n\ndef _load_words(path):\n \"\"\"Encapsulates the loading/newfile creation logic.\"\"\"\n try:\n words = WordList(path)\n print(\"Words successfully loaded.\")\n\n except FileNotFoundError:\n print(f\"No word list found with given name.\")\n newfile = force_console_input(\n \"Would you like to create a new wordlist with the specified name? Y/N: \",\n options=[\"y\", \"yes\", \"n\", \"no\"],\n )\n if newfile[0] == \"y\":\n words = WordList()\n language = force_console_input(\n query=\"Which language should be used?\\n\",\n onfail=\"Language not recognised, please try again or check genders.json\\n\",\n options=get_languages(),\n )\n words.new(language=language)\n print(f\"New WordList for language {language} successfully created.\")\n else:\n raise IOError\n\n return words\n\n\ndef _quiz(wordlist, quiz_length):\n \"\"\"Runs a command line quiz of the specified length.\"\"\"\n pd.options.mode.chained_assignment = None # Suppresses SettingWithCopyWarning\n\n answered, correct = 0, 0\n for word, gender in wordlist.get_words(quiz_length):\n guess = input(f\"What is the gender of {word}? \").lower()\n if guess in (\"quit\", \"exit\"):\n break\n\n answered += 1\n\n try:\n guess = wordlist.format_gender(guess)\n except ValueError:\n print(\"Unrecognised guess, skipping.\\n\")\n continue\n\n accurate = gender == guess\n wordlist.update_weight(word, accurate)\n if accurate:\n print(\"Correct!\\n\")\n correct += 1\n else:\n print(f\"Incorrect! The correct gender is {gender}.\\n\")\n\n return correct, answered, answered == quiz_length\n\n\ndef _quiz_endless(wordlist):\n \"\"\"Runs quizzes in batches of 20 until quit or exit is answered.\"\"\"\n correct, answered = 0, 0\n finished = False\n while not finished:\n results = _quiz(wordlist, 20)\n correct += results[0]\n answered += results[1]\n finished = not results[2]\n\n return correct, answered\n\n\ndef _add_words(wordlist):\n \"\"\"CLI for adding words individually to the wordlist.\"\"\"\n print(\"Type a word with gender eg `m Mann` or `quit` when finished.\")\n while True:\n input_str = input()\n if input_str in (\"quit\", \"exit\"):\n print(\"Exiting word addition mode...\")\n break\n\n try:\n gender, word = input_str.split()\n wordlist.add(gender, word)\n except ValueError as e:\n print(e)\n\n\ndef _import_words(wordlist, import_path):\n \"\"\"Loads words from a csv file at import_path into `wordlist`.\"\"\"\n new_words = pd.read_csv(import_path)\n words_added = 0\n repetitions = 0\n for _, row in new_words.iterrows():\n try:\n wordlist.add(row.Gender, row.Word)\n words_added += 1\n except ValueError:\n repetitions += 1\n\n return words_added, repetitions\n\n\ndef _save_and_exit(wordlist, path):\n while True:\n try:\n wordlist.save(path=path)\n # TODO: Can WordList be made into a context manager?\n print(\"WordList successfully saved, goodbye!\")\n break\n except PermissionError:\n print(\"PermissionError! File may be open in another window.\")\n retry = force_console_input(\"Try again? Y/N: \", [\"y\", \"yes\", \"n\", \"no\"])\n if retry[0] == \"y\":\n continue\n else:\n print(\"Exiting without saving changes.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
thisisshi/sdk
|
[
"99c52caffeebbfd41f43931fea2b5b1323841892"
] |
[
"python/avi/migrationtools/netscaler_converter/ns_util.py"
] |
[
"import csv\nimport logging\nimport os\nimport copy\nimport re\nimport random\nfrom functools import reduce\n\nimport ast\nimport pandas\nimport pexpect\nimport avi.migrationtools.netscaler_converter.ns_constants as ns_constants\nfrom pkg_resources import parse_version\nfrom xlsxwriter import Workbook\nfrom openpyxl import load_workbook\nfrom urllib.parse import urlparse\nfrom OpenSSL import crypto\nfrom socket import gethostname\nfrom avi.migrationtools.netscaler_converter.ns_constants \\\n import (STATUS_SKIPPED, STATUS_SUCCESSFUL, STATUS_INDIRECT,\n STATUS_NOT_APPLICABLE, STATUS_PARTIAL, STATUS_DATASCRIPT,\n STATUS_INCOMPLETE_CONFIGURATION, STATUS_COMMAND_NOT_SUPPORTED,\n OBJECT_TYPE_POOL_GROUP, OBJECT_TYPE_POOL, STATUS_NOT_IN_USE,\n OBJECT_TYPE_HTTP_POLICY_SET, STATUS_LIST, COMPLEXITY_ADVANCED,\n COMPLEXITY_BASIC, OBJECT_TYPE_APPLICATION_PERSISTENCE_PROFILE,\n OBJECT_TYPE_APPLICATION_PROFILE)\nfrom avi.migrationtools.avi_migration_utils import MigrationUtil, update_count\n\nLOG = logging.getLogger(__name__)\n\ncsv_writer_dict_list = []\nskipped_setting = {\n # 'virtual_service': '',\n # 'ssl key and cert': {},\n # 'ssl profile': {},\n # 'pool group': {},\n # 'health monitor': {},\n # 'Httppolicy': {}\n}\n# Added variable for checking progress and get overall object.\nprogressbar_count = 0\ntotal_count = 0\n\n\nclass NsUtil(MigrationUtil):\n\n def add_conv_status(self, line_no, cmd, object_type, full_command, conv_status,\n avi_object=None):\n \"\"\"\n Adds as status row in conversion status csv\n :param line_no: line number of command\n :param object_type:\n :param full_command: netscaler command\n :param conv_status: dict of conversion status\n :param avi_object: Converted objectconverted avi object\n \"\"\"\n\n row = {\n 'Line Number': line_no if line_no else '',\n 'Netscaler Command': cmd if cmd else '',\n 'Object Name': object_type if object_type else '',\n 'Full Command': full_command if full_command else '',\n 'Status': conv_status.get('status', ''),\n 'Skipped settings': str(conv_status.get('skipped', '')),\n 'Indirect mapping': str(conv_status.get('indirect', '')),\n 'Not Applicable': str(conv_status.get('na_list', '')),\n 'User Ignored': str(conv_status.get('user_ignore', '')),\n 'AVI Object': str(avi_object) if avi_object else ''\n }\n csv_writer_dict_list.append(row)\n\n def add_complete_conv_status(self, ns_config, output_dir, avi_config,\n report_name, vs_level_status):\n \"\"\"\n Adds as status row in conversion status csv\n :param ns_config: NS config dict\n :param output_dir: output directory\n :param avi_config: AVI config dict\n :param report_name: name of report\n :param vs_level_status: add vs level details in XL sheet\n \"\"\"\n\n global csv_writer_dict_list\n global progressbar_count\n global total_count\n print(\"Generating Report For Converted Configuration...\")\n ptotal = len(ns_config)\n ppcount = 0\n for config_key in ns_config:\n # increment progressbar count\n ppcount += 1\n config_object = ns_config[config_key]\n msg = \"Generating report\"\n self.print_progress_bar(ppcount, ptotal, msg, prefix='Progress',\n suffix='')\n for element_key in config_object:\n element_object_list = config_object[element_key]\n if isinstance(element_object_list, dict):\n element_object_list = [element_object_list]\n for element_object in element_object_list:\n match = [match for match in csv_writer_dict_list if\n match['Line Number'] == element_object['line_no']]\n if not match:\n ns_complete_command = self.get_netscalar_full_command(\n config_key, element_object)\n # Add status incomplete configuration\n self.add_status_row(\n element_object['line_no'], config_key,\n element_object['attrs'][0], ns_complete_command,\n STATUS_INCOMPLETE_CONFIGURATION)\n unique_line_number_list = set()\n row_list = []\n for dict_row in csv_writer_dict_list:\n if dict_row['Line Number'] not in unique_line_number_list:\n unique_line_number_list.add(dict_row['Line Number'])\n row_list.append(dict_row)\n else:\n row = [row for row in row_list\n if row['Line Number'] == dict_row['Line Number']]\n if str(dict_row['AVI Object']).startswith('Skipped'):\n continue\n if dict_row.get('AVI Object', None):\n # Added condition to check unique status.\n if str(row[0]['AVI Object']) != str(dict_row['AVI Object']):\n row[0]['AVI Object'] += '__/__%s' % dict_row[\n 'AVI Object']\n for status in STATUS_LIST:\n status_list = [row for row in row_list if\n row['Status'] == status]\n print('%s: %s' % (status, len(status_list)))\n # add skipped list of each object at vs level\n print(\"Writing Excel Sheet For Converted Configuration...\")\n total_count = total_count + len(row_list)\n if vs_level_status:\n self.vs_per_skipped_setting_for_references(avi_config)\n self.correct_vs_ref(avi_config)\n else:\n # Call to calculate vs complexity\n self.vs_complexity_level()\n # Write status report and pivot table in xlsx report\n self.write_status_report_and_pivot_table_in_xlsx(\n row_list, output_dir, report_name, vs_level_status)\n\n def add_status_row(self, line_no, cmd, object_type, full_command, status,\n avi_object=None):\n \"\"\"\n Adds as status row in conversion status csv\n :param line_no:\n :param cmd: netscaler command\n :param object_type:\n :param full_command:\n :param status: conversion status\n :param avi_object:\n \"\"\"\n global csv_writer_dict_list\n row = {\n 'Line Number': line_no if line_no else '',\n 'Netscaler Command': cmd,\n 'Object Name': object_type,\n 'Full Command': full_command,\n 'Status': status,\n 'AVI Object': str(avi_object) if avi_object else ''\n }\n csv_writer_dict_list.append(row)\n\n def add_csv_headers(self, csv_file):\n \"\"\"\n Adds header line in conversion status file\n :param csv_file: File to which header is to be added\n \"\"\"\n\n global csv_writer\n fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',\n 'Full Command', 'Status', 'Skipped settings',\n 'Indirect mapping', 'Not Applicable', 'User Ignored',\n 'AVI Object']\n csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames,\n lineterminator='\\n', )\n\n csv_writer.writeheader()\n\n def get_avi_lb_algorithm(self, ns_algorithm):\n \"\"\"\n Converts NS LB algorithm to equivalent avi LB algorithm\n :param ns_algorithm: NS algorithm name\n :return: Avi LB algorithm enum value\n \"\"\"\n\n avi_algorithm = 'LB_ALGORITHM_LEAST_CONNECTIONS'\n if ns_algorithm == 'LEASTCONNECTIONS':\n avi_algorithm = 'LB_ALGORITHM_LEAST_CONNECTIONS'\n elif ns_algorithm == 'ROUNDROBIN':\n avi_algorithm = 'LB_ALGORITHM_ROUND_ROBIN'\n elif ns_algorithm in ['LEASTRESPONSETIME', 'LRTM']:\n avi_algorithm = 'LB_ALGORITHM_FASTEST_RESPONSE'\n elif ns_algorithm == 'SOURCEIPHASH':\n avi_algorithm = 'LB_ALGORITHM_CONSISTENT_HASH'\n elif ns_algorithm == 'URLHASH':\n avi_algorithm = 'LB_ALGORITHM_CONSISTENT_HASH_URI'\n return avi_algorithm\n\n def update_algo_for_pools(self, algo, pg_name, avi_config):\n pool_group = [pg for pg in avi_config['PoolGroup'] if\n pg['name'] == pg_name][0]\n for member in pool_group['members']:\n pool_name = self.get_name(member['pool_ref'])\n pool = [pool for pool in avi_config['Pool'] if\n pool['name'] == pool_name][0]\n pool['lb_algorithm'] = algo\n\n def get_avi_resp_code(self, respCode):\n \"\"\"\n This function used for getting appropriate response code for avi.\n :param respCode: response code\n :return: returns list of unique responses.\n \"\"\"\n\n avi_resp_codes = []\n codes = []\n for res_code in respCode.split(' '):\n if '-' in res_code:\n codes.extend(res_code.split('-'))\n else:\n codes.append(res_code)\n for code in codes:\n if code and code.strip().isdigit():\n # Converted to int.\n code = int(code.strip())\n if code < 200:\n avi_resp_codes.append(\"HTTP_1XX\")\n elif code < 300:\n avi_resp_codes.append(\"HTTP_2XX\")\n elif code < 400:\n avi_resp_codes.append(\"HTTP_3XX\")\n elif code < 500:\n avi_resp_codes.append(\"HTTP_4XX\")\n elif code < 600:\n avi_resp_codes.append(\"HTTP_5XX\")\n # Get the unique dict from list.\n avi_resp_codes = list(set(avi_resp_codes))\n if not avi_resp_codes:\n avi_resp_codes = [\"HTTP_ANY\"]\n return avi_resp_codes\n\n def get_conv_status(self, ns_object, skipped_list, na_list, indirect_list,\n ignore_for_val=None, indirect_commands=None,\n user_ignore_val=[]):\n \"\"\"\n This function used for getting status detail for command like\n skipped or indirect.\n :param ns_object: Netscaler parsed config\n :param skipped_list: list of skipped commands list.\n :param na_list: not applicable commands list.\n :param indirect_list: indirect command list\n :param ignore_for_val: optional field\n :param indirect_commands: indirect commands\n :param user_ignore_val: List of user ignore attributes\n :return: returns dict of coversion status.\n \"\"\"\n\n skipped = [attr for attr in ns_object.keys() if attr in skipped_list]\n na = [attr for attr in ns_object.keys() if attr in na_list]\n indirect = [attr for attr in ns_object.keys() if attr in indirect_list]\n # List of ignore attributes which are present in skipped\n user_ignore = [val for val in skipped if val in user_ignore_val]\n # Removed the attributes from skipped which are in user ignore list\n skipped = [attr for attr in skipped if attr not in user_ignore_val]\n if ignore_for_val:\n for key in ignore_for_val.keys():\n if key not in ns_object:\n continue\n ns_val = ns_object.get(key)\n ignore_val = ignore_for_val.get(key)\n if key in skipped and str(ns_val) == str(ignore_val):\n skipped.remove(key)\n if skipped:\n status = STATUS_PARTIAL\n else:\n status = STATUS_SUCCESSFUL\n\n conv_status = {\n 'skipped': skipped,\n 'indirect': indirect,\n 'na_list': na,\n 'status': status,\n 'user_ignore': user_ignore\n }\n return conv_status\n\n def get_key_cert_obj(self, name, key_file_name, cert_file_name, input_dir):\n \"\"\"\n :param name:name of ssl cert.\n :param key_file_name: key file (ie.pem)\n :param cert_file_name: certificate file name\n :param input_dir: input directory for certificate file name\n :return: returns dict of ssl object\n \"\"\"\n folder_path = input_dir + os.path.sep\n key = self.upload_file(folder_path + key_file_name)\n cert = self.upload_file(folder_path + cert_file_name)\n ssl_kc_obj = None\n if key and cert:\n cert = {\"certificate\": cert}\n ssl_kc_obj = {\n 'name': name,\n 'key': key,\n 'certificate': cert,\n 'key_passphrase': ''\n }\n return ssl_kc_obj\n\n def get_command_from_line(self, line):\n \"\"\"\n This function is used for getting command and line number from conf file.\n :param line: line\n :return: returns command name and line\n \"\"\"\n\n cmd = ''\n line_no = 0\n for member in line:\n if 'line_no' in member:\n line_no = member[1]\n continue\n if isinstance(member, str):\n cmd += ' %s' % member\n else:\n cmd += ' -%s' % ' '.join(member)\n return cmd, line_no\n\n def update_status_for_skipped(self, skipped_cmds):\n \"\"\"\n :param skipped_cmds: separation of non converted commands\n to NA, Indirect,DataScript,NotSupported\n :return: None\n \"\"\"\n\n na_cmds = ns_constants.netscalar_command_status['NotApplicableCommands']\n indirect_cmds = ns_constants.netscalar_command_status[\n 'IndirectCommands']\n datascript_cmds = \\\n ns_constants.netscalar_command_status['DatascriptCommands']\n not_supported = ns_constants.netscalar_command_status['NotSupported']\n if not skipped_cmds:\n return\n for cmd in skipped_cmds:\n line_no = cmd['line_no']\n cmd = cmd['cmd']\n cmd = cmd.strip()\n for na_cmd in na_cmds:\n if cmd.startswith(na_cmd):\n # Add status not applicable in csv/report\n self.add_status_row(line_no, na_cmd, None, cmd,\n STATUS_NOT_APPLICABLE)\n break\n for id_cmd in indirect_cmds:\n if cmd.startswith(id_cmd):\n # Add status indirect in csv/report\n self.add_status_row(line_no, id_cmd, None, cmd, STATUS_INDIRECT)\n break\n for datascript_cmd in datascript_cmds:\n if cmd.startswith(datascript_cmd):\n # Add status datascript in csv/report\n self.add_status_row(line_no, datascript_cmd, None, cmd,\n STATUS_DATASCRIPT)\n break\n for not_commands in not_supported:\n if cmd.startswith(not_commands):\n # Add status not not supported in csv/report\n self.add_status_row(line_no, not_commands, None, cmd,\n STATUS_COMMAND_NOT_SUPPORTED)\n break\n\n def remove_duplicate_objects(self, obj_type, obj_list):\n \"\"\"\n Remove duplicate objects from list\n :param obj_type: Object type\n :param obj_list: list of all objects\n :return: return list which has no duplicates objects\n \"\"\"\n\n if len(obj_list) == 1:\n return obj_list\n for source_obj in obj_list:\n for index, tmp_obj in enumerate(obj_list):\n if tmp_obj[\"name\"] == source_obj[\"name\"]:\n continue\n src_cp = copy.deepcopy(source_obj)\n tmp_cp = copy.deepcopy(tmp_obj)\n del src_cp[\"name\"]\n if \"description\" in src_cp:\n del src_cp[\"description\"]\n\n del tmp_cp[\"name\"]\n if \"description\" in tmp_cp:\n del tmp_cp[\"description\"]\n if src_cp.items() == tmp_cp.items():\n LOG.warn('Remove duplicate %s object : %s' % (obj_type,\n tmp_obj[\n \"name\"]))\n del obj_list[index]\n self.remove_duplicate_objects(obj_type, obj_list)\n return obj_list\n\n def cleanup_config(self, config):\n \"\"\"\n This function is used for deleting temp variables created for conversion\n :param config: dict type\n :return: None\n \"\"\"\n\n del config\n\n def clone_pool(self, pool_name, cloned_for, avi_config, userprefix=None):\n \"\"\"\n This function used for cloning shared pools in netscaler.\n :param pool_name: name of pool\n :param cloned_for: cloned for\n :param avi_config: avi config dict\n :param userprefix: prefix for objects\n :return: None\n \"\"\"\n pools = [pool for pool in avi_config['Pool'] if\n pool['name'] == pool_name]\n if pools:\n pool_obj = copy.deepcopy(pools[0])\n pname = pool_obj['name']\n pool_name = re.sub('[:]', '-', '%s-%s' % (pname, cloned_for))\n pool_obj['name'] = pool_name\n avi_config['Pool'].append(pool_obj)\n LOG.info(\n \"Same pool reference to other object. Clone Pool %s for %s\" %\n (pool_name, cloned_for))\n return pool_obj['name']\n return None\n\n def get_vs_if_shared_vip(self, avi_config, controller_version):\n \"\"\"\n This function checks if same vip is used for other vs\n :param avi_config: avi config dict\n :param controller_version:\n :return: None\n \"\"\"\n\n vs_list = [v for v in avi_config['VirtualService'] if\n 'port_range_end' in\n v['services'][0]]\n for vs in vs_list:\n # Get the list of vs which shared the same vip\n if parse_version(controller_version) >= parse_version('17.1'):\n vs_port_list = [int(v['services'][0]['port']) for v in\n avi_config['VirtualService']\n if v['vsvip_ref'].split('name=')[1].split('-')[0] ==\n vs['vsvip_ref'].split('name=')[1].split('-')[0]\n and 'port_range_end' not in v['services'][0]]\n else:\n vs_port_list = [int(v['services'][0]['port']) for v in\n avi_config['VirtualService'] if v['ip_address'][\n 'addr'] == vs['ip_address']['addr'] and\n 'port_range_end' not in v['services'][0]]\n\n if vs_port_list:\n min_port = min(vs_port_list)\n max_port = max(vs_port_list)\n vs['services'][0]['port_range_end'] = str(min_port - 1)\n service = {\n 'enable_ssl': False,\n 'port': str(max_port + 1),\n 'port_range_end': '65535'\n }\n vs['services'].append(service)\n\n def add_prop_for_http_profile(self, profile_name, avi_config, sysdict,\n prop_dict):\n \"\"\"\n This method adds the additional attribute to application profile\n :param profile_name: name of application profile\n :param avi_config: avi config dict\n :param sysdict: system/baseline config dict\n :param prop_dict: property dict\n :return:\n \"\"\"\n\n profile = [p for p in (avi_config['ApplicationProfile'] + sysdict[\n 'ApplicationProfile']) if p['name'] == profile_name]\n if profile:\n if prop_dict.get('clttimeout'):\n profile[0]['client_header_timeout'] = int(prop_dict[\n 'clttimeout'])\n profile[0]['client_body_timeout'] = int(prop_dict['clttimeout'])\n if prop_dict.get('xff_enabled'):\n if profile[0].get('http_profile'):\n profile[0]['http_profile'].update(\n {\n 'xff_enabled': True,\n 'xff_alternate_name': 'X-Forwarded-For'\n }\n )\n else:\n profile[0].update({'http_profile':\n {\n 'xff_enabled': True,\n 'xff_alternate_name': 'X-Forwarded-For'\n }\n })\n if profile[0].get('http_profile'):\n profile[0]['http_profile'].update(\n {\n 'x_forwarded_proto_enabled': True,\n 'hsts_enabled': True,\n 'http_to_https': True,\n 'httponly_enabled': True,\n 'hsts_max_age': 365,\n 'server_side_redirect_to_https': True,\n 'secure_cookie_enabled': True\n }\n )\n else:\n profile[0].update({'http_profile':\n {\n 'x_forwarded_proto_enabled': True,\n 'hsts_enabled': True,\n 'http_to_https': True,\n 'httponly_enabled': True,\n 'hsts_max_age': 365,\n 'server_side_redirect_to_https': True,\n 'secure_cookie_enabled': True\n }\n })\n\n def object_exist(self, object_type, name, avi_config):\n '''\n This method returns true if object exists in avi config dict else false\n :param object_type:\n :param name:\n :param avi_config:\n :return:\n '''\n data = avi_config[object_type]\n obj_list = [obj for obj in data if obj['name'] == name]\n if obj_list:\n return True\n return False\n\n def is_shared_same_vip(self, vs, cs_vs_list, avi_config, tenant_name,\n cloud_name, tenant_ref, cloud_ref,\n controller_version, prefix, input_vrf=None):\n \"\"\"\n This function check for vs sharing same vip\n :param vs: Name of vs\n :param cs_vs_list: List of vs\n :param avi_config: avi config dict\n :param tenant_name: Name of tenant\n :param cloud_name: Name of cloud\n :param tenant_ref: Reference of tenant\n :param cloud_ref: Reference of cloud\n :param controller_version: controller version\n :param prefix: prefix for objects\n :param input_vrf: VRF name input\n :return: None\n \"\"\"\n\n if parse_version(controller_version) >= parse_version('17.1'):\n # Get the list of vs which shared the same vip\n shared_vip = [v for v in cs_vs_list if v['vsvip_ref'\n ].split('name=')[1].split('-')[0] == vs['vsvip_ref'\n ].split('name=')[1].split('-')[0] and\n v['services'][0][\n 'port'] == vs['services'][0]['port']]\n else:\n shared_vip = [v for v in cs_vs_list if v['ip_address']['addr'] ==\n vs['ip_address']['addr'] and v['services'][0][\n 'port'] ==\n vs['services'][0]['port']]\n\n if input_vrf:\n vrf_ref = self.get_object_ref(input_vrf, 'vrfcontext',\n cloud_name=cloud_name)\n else:\n vrf_ref = self.get_object_ref('global', 'vrfcontext',\n cloud_name=cloud_name)\n\n if shared_vip:\n return True\n elif parse_version(controller_version) >= parse_version('17.1'):\n vsvip = vs['vsvip_ref'].split('name=')[1].split('-')[0]\n self.create_update_vsvip(vsvip, avi_config['VsVip'], tenant_ref,\n cloud_ref, prefix=prefix, vrf_ref=vrf_ref)\n name = vsvip + '-vsvip'\n # Added prefix for objects\n if prefix:\n name = prefix + '-' + vsvip + '-vsvip'\n updated_vsvip_ref = self.get_object_ref(\n name, 'vsvip', tenant_name, cloud_name)\n vs['vsvip_ref'] = updated_vsvip_ref\n\n def clone_http_policy_set(self, policy, prefix, avi_config, tenant_name,\n cloud_name, used_poolgrp_ref, userprefix=None):\n \"\"\"\n This function clone pool reused in context switching rule\n :param policy: name of policy\n :param prefix: clone for\n :param avi_config: avi config dict\n :param tenant_name:\n :param cloud_name:\n :param used_poolgrp_ref:\n :param userprefix: prefix for objects\n :return:None\n \"\"\"\n\n policy_name = policy['name']\n clone_policy = copy.deepcopy(policy)\n for rule in clone_policy['http_request_policy']['rules']:\n if rule.get('switching_action', None) and \\\n rule['switching_action'].get('pool_group_ref'):\n pool_group_ref = \\\n rule['switching_action']['pool_group_ref'].split('&')[\n 1].split(\n '=')[1]\n if pool_group_ref in used_poolgrp_ref:\n LOG.debug('Cloned the pool group for policy %s',\n policy_name)\n pool_group_ref = self.clone_pool_group(\n pool_group_ref, policy_name, avi_config, tenant_name,\n cloud_name, userprefix=userprefix)\n if pool_group_ref:\n updated_pool_group_ref = self.get_object_ref(\n pool_group_ref, OBJECT_TYPE_POOL_GROUP, tenant_name,\n cloud_name)\n rule['switching_action']['pool_group_ref'] = \\\n updated_pool_group_ref\n clone_policy['name'] += '-%s-clone' % prefix\n return clone_policy\n\n def set_rules_index_for_http_policy_set(self, avi_config):\n \"\"\"\n Update index as per avi protobuf requirements\n :param avi_config: avi config dict\n :return: None\n \"\"\"\n\n http_policy_sets = avi_config['HTTPPolicySet']\n for http_policy_set in http_policy_sets:\n rules = http_policy_set['http_request_policy']['rules']\n rules = sorted(rules, key=lambda d: int(d['index']))\n for index, rule in enumerate(rules):\n rule['index'] = index\n\n def get_netscalar_full_command(self, netscalar_command, obj):\n \"\"\"\n Generate netscaler command from the parse dict\n :param netscalar_command: name of command\n :param obj: object with attributes\n :return: Full command\n \"\"\"\n\n for attr in obj['attrs']:\n netscalar_command += ' %s' % attr\n for key in obj:\n if isinstance(obj[key], list):\n continue\n if key == 'line_no':\n continue\n netscalar_command += ' -%s %s' % (key, obj[key])\n return netscalar_command\n\n def clone_pool_group(self, pg_name, cloned_for, avi_config, tenant_name,\n cloud_name, userprefix=None):\n \"\"\"\n Used for cloning shared pool group.\n :param pg_name: pool group name\n :param cloned_for: clone for\n :param avi_config: avi config dict\n :param tenant_name:\n :param cloud_name:\n :param userprefix: prefix for objects\n :return: None\n \"\"\"\n pool_groups = [pg for pg in avi_config['PoolGroup']\n if pg['name'] == pg_name]\n if pool_groups:\n pool_group = copy.deepcopy(pool_groups[0])\n pool_group_name = re.sub('[:]', '-',\n '%s-%s' % (pg_name, cloned_for))\n pool_group['name'] = pool_group_name\n for member in pool_group.get('members', []):\n pool_ref = self.get_name(member['pool_ref'])\n pool_ref = self.clone_pool(pool_ref, cloned_for, avi_config,\n userprefix=userprefix)\n if pool_ref:\n updated_pool_ref = self.get_object_ref(\n pool_ref, OBJECT_TYPE_POOL, tenant_name, cloud_name)\n member['pool_ref'] = updated_pool_ref\n avi_config['PoolGroup'].append(pool_group)\n LOG.info(\n \"Same pool group reference to other object. Clone Pool group \"\n \"%s for %s\" % (pg_name, cloned_for))\n return pool_group['name']\n return None\n\n def remove_http_mon_from_pool(self, avi_config, pool, sysdict):\n \"\"\"\n This function is used for removing http type health monitor from https\n vs.\n :param avi_config: avi config dict\n :param pool: name of pool\n :param sysdict: baseline/system config dict\n :return: None\n \"\"\"\n if pool:\n hm_refs = copy.deepcopy(pool['health_monitor_refs'])\n for hm_ref in hm_refs:\n hm = [h for h in (sysdict['HealthMonitor'] + avi_config[\n 'HealthMonitor']) if h['name'] == hm_ref]\n if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTP':\n pool['health_monitor_refs'].remove(hm_ref)\n LOG.warning(\n 'Skipping %s this reference from %s pool because '\n 'of health monitor type is HTTP and VS has ssl '\n 'profile.' % (hm_ref, pool['name']))\n\n def remove_https_mon_from_pool(self, avi_config, pool, sysdict):\n \"\"\"\n This function is used for removing https type health monitor from http\n vs.\n :param avi_config: avi config dict\n :param pool: name of pool\n :param sysdict: baseline/system config dict\n :return: None\n \"\"\"\n if pool:\n hm_refs = copy.deepcopy(pool['health_monitor_refs'])\n for hm_ref in hm_refs:\n hm = [h for h in (sysdict['HealthMonitor'] + avi_config[\n 'HealthMonitor']) if h['name'] == hm_ref]\n if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTPS':\n pool['health_monitor_refs'].remove(hm_ref)\n LOG.warning(\n 'Skipping %s this reference from %s pool because '\n 'of health monitor type is HTTPS and VS has no ssl '\n 'profile.' % (hm_ref, pool['name']))\n\n def update_application_profile(self, profile_name, pki_profile_ref,\n tenant_ref, name, avi_config, sysdict):\n \"\"\"\n This functions defines to update application profile with pki profile if\n application profile exist if not create new http profile with pki profile\n :param profile_name: name of Http profile\n :param pki_profile_ref: ref of PKI profile\n :param tenant_ref: tenant ref\n :param name: name of virtual service\n :param avi_config: Dict of AVi config\n :param sysdict: baseline/system config\n :return: Http profile\n \"\"\"\n\n try:\n if profile_name:\n app_profile = [p for p in (sysdict['ApplicationProfile'] +\n avi_config['ApplicationProfile']) if\n p['name'] ==\n profile_name]\n if app_profile:\n app_profile[0][\"http_profile\"]['pki_profile_ref'] = \\\n pki_profile_ref\n LOG.debug('Added PKI profile to application profile '\n 'successfully : %s' % (\n profile_name, pki_profile_ref))\n else:\n app_profile = dict()\n app_profile['name'] = name + '-%s-%s' % (\n random.randrange(0, 1000),\n ns_constants.PLACE_HOLDER_STR)\n app_profile['tenant_ref'] = tenant_ref\n app_profile['type'] = 'APPLICATION_PROFILE_TYPE_HTTP'\n http_profile = dict()\n http_profile['connection_multiplexing_enabled'] = False\n http_profile['xff_enabled'] = False\n # TODO: clientIpHdrExpr conversion to xff_alternate_name\n http_profile['websockets_enabled'] = False\n http_profile['pki_profile_ref'] = pki_profile_ref\n app_profile[\"http_profile\"] = http_profile\n avi_config['ApplicationProfile'].append(app_profile)\n LOG.debug(\n \"Conversion completed successfully for httpProfile: %s\" %\n app_profile['name'])\n return app_profile['name']\n except:\n update_count('error')\n LOG.error(\"Error in convertion of httpProfile\", exc_info=True)\n\n def convert_persistance_prof(self, vs, name, tenant_ref):\n \"\"\"\n This function defines that it convert the persistent profile and\n return that profile\n :param vs: object of lb vs or pool\n :param name: name of application persteance profile\n :param tenant_ref: reference of tenant\n :return: application persistent profile\n \"\"\"\n\n profile = None\n persistenceType = vs.get('persistenceType', '')\n if persistenceType == 'COOKIEINSERT':\n timeout = vs.get('timeout', 2)\n profile = {\n \"http_cookie_persistence_profile\": {\n \"always_send_cookie\": False\n },\n \"persistence_type\": \"PERSISTENCE_TYPE_HTTP_COOKIE\",\n \"server_hm_down_recovery\": \"HM_DOWN_PICK_NEW_SERVER\",\n \"name\": name,\n }\n # Added time if greater than zero\n if int(timeout) > 0:\n profile['http_cookie_persistence_profile'][\"timeout\"] = timeout\n elif persistenceType == 'SOURCEIP':\n # Set timeout equal to 2 if not provided.\n timeout = vs.get('timeout', 120)\n timeout = int(timeout) / 60\n if timeout < 1:\n timeout = 1\n profile = {\n \"server_hm_down_recovery\": \"HM_DOWN_PICK_NEW_SERVER\",\n \"persistence_type\": \"PERSISTENCE_TYPE_CLIENT_IP_ADDRESS\",\n \"ip_persistence_profile\": {\n \"ip_persistent_timeout\": timeout\n },\n \"name\": name\n }\n elif persistenceType == 'SSLSESSION':\n profile = {\n \"server_hm_down_recovery\": \"HM_DOWN_PICK_NEW_SERVER\",\n \"persistence_type\": \"PERSISTENCE_TYPE_TLS\",\n \"name\": name\n }\n profile['tenant_ref'] = tenant_ref\n return profile\n\n def update_status_target_lb_vs_to_indirect(self, larget_lb_vs):\n \"\"\"\n This function defines that update status for the target lb vserver as\n Indirect\n :param larget_lb_vs: name of target lb vserver\n :return: None\n \"\"\"\n global csv_writer_dict_list\n row = [row for row in csv_writer_dict_list\n if row['Object Name'] == larget_lb_vs\n and row['Netscaler Command'] == 'add lb vserver']\n if row:\n row[0]['Status'] = STATUS_INDIRECT\n\n def create_http_policy_set_for_redirect_url(self, vs_obj, redirect_uri,\n avi_config, tenant_name, tenant_ref, enable_ssl):\n \"\"\"\n This function defines that create http policy for redirect url\n :param vs_obj: object of VS\n :param redirect_uri: redirect uri\n :param avi_config: dict of AVi\n :param tenant_name: name of tenant\n :param tenant_ref: tenant ref\n :param enable_ssl: flag for enabling ssl\n :return: None\n \"\"\"\n redirect_uri = str(redirect_uri).replace('\"', '')\n action = self.build_redirect_action_dict(redirect_uri, enable_ssl)\n policy_obj = {\n 'name': vs_obj['name'] + '-redirect-policy',\n 'tenant_ref': tenant_ref,\n 'http_request_policy': {\n 'rules': [\n {\n 'index': 0,\n 'name': vs_obj['name'] + '-redirect-policy-rule-0',\n 'match': {\n 'path': {\n 'match_case': 'INSENSITIVE',\n 'match_str': [\n '/'\n ],\n 'match_criteria': 'EQUALS'\n }\n },\n 'redirect_action': action\n }\n ]\n }\n }\n updated_http_policy_ref = self.get_object_ref(policy_obj['name'],\n OBJECT_TYPE_HTTP_POLICY_SET,\n tenant_name)\n http_policies = {\n 'index': 11,\n 'http_policy_set_ref': updated_http_policy_ref\n }\n if not vs_obj.get('http_policies'):\n vs_obj['http_policies'] = []\n else:\n ind = max([policies['index'] for policies in vs_obj[\n 'http_policies']])\n http_policies['index'] = ind + 1\n vs_obj['http_policies'].append(http_policies)\n avi_config['HTTPPolicySet'].append(policy_obj)\n\n def clean_virtual_service_from_avi_config(self, avi_config,\n controller_version):\n \"\"\"\n This function defines that clean up vs which has vip 0.0.0.0\n :param avi_config: dict of AVI\n :param controller_version:\n :return: None\n \"\"\"\n vs_list = copy.deepcopy(avi_config['VirtualService'])\n avi_config['VirtualService'] = []\n if parse_version(controller_version) >= parse_version('17.1'):\n avi_config['VirtualService'] = \\\n [vs for vs in vs_list\n if vs['vsvip_ref'].split('name=')[1].split('-')[0] != '0.0.0.0']\n else:\n avi_config['VirtualService'] = \\\n [vs for vs in vs_list\n if vs['ip_address']['addr'] != '0.0.0.0']\n\n def parse_url(self, url):\n \"\"\"\n This method returns the parsed url\n :param url: url that need to be parsed\n :return:\n \"\"\"\n parsed = urlparse(url)\n return parsed\n\n def format_string_to_json(self, avi_string):\n \"\"\"\n This function defines that it convert string into json format to\n convert into dict\n :param avi_string: string to be converted\n :return: Return converted string\n \"\"\"\n avi_string = avi_string.split('__/__')[0]\n return ast.literal_eval(avi_string)\n\n def get_csv_object_list(self, csv_writer_dict_list, command_list):\n \"\"\"\n This method is used for getting csv object\n :param csv_writer_dict_list: CSV row of object from xlsx report\n :param command_list: List of netscaler commands\n :return: List of CSV rows\n \"\"\"\n csv_object = [row for row in\n csv_writer_dict_list\n if row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]\n and row['Netscaler Command'] in\n command_list]\n return csv_object\n\n def get_csv_skipped_list(self, csv_object, name_of_object, vs_ref):\n \"\"\"\n This method is used for getting skipped list from vs.\n :param csv_object: CSV row of object from xlsx report\n :param name_of_object: Name of Object\n :param vs_ref: Reference of VS\n :return: List of skipped settings\n \"\"\"\n\n skipped_list = []\n for each_partial in csv_object:\n avi_object_json = \\\n self.format_string_to_json(each_partial['AVI Object'])\n if avi_object_json.get('name') and \\\n avi_object_json['name'] == name_of_object:\n # Set the VS reference for Netscaler status row\n each_partial['VS Reference'] = vs_ref\n repls = ('[', ''), (']', '')\n skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv),\n repls,\n each_partial['Skipped settings'])\n if skipped_setting_csv:\n skipped_list.append(skipped_setting_csv)\n return skipped_list\n\n def get_ssl_key_and_cert_refs_skipped(self, csv_writer_dict_list,\n object_name, vs_ref):\n \"\"\"\n This functions defines that get the skipped list of CSV row\n :param csv_writer_dict_list: CSV row of object from xlsx report\n :param object_name: like virtual service or pool name\n :param vs_ref: Reference of VS\n :return: List of skipped settings\n \"\"\"\n\n ssl_key_cert = \\\n self.get_name(object_name['ssl_key_and_certificate_refs'][0])\n csv_object = self.get_csv_object_list(\n csv_writer_dict_list, ['bind ssl vserver', 'bind ssl service',\n 'bind ssl serviceGroup'])\n skipped_list = self.get_csv_skipped_list(csv_object, ssl_key_cert,\n vs_ref)\n return ssl_key_cert, skipped_list\n\n def get_ssl_profile_skipped(self, csv_writer_dict_list, ssl_profile_ref,\n vs_ref):\n \"\"\"\n This functions defines that get the skipped list of CSV row\n :param csv_writer_dict_list: CSV row of object from xlsx report\n :param ssl_profile_ref: reference of ssl profile object\n :param vs_ref: virtual service obj reference.\n :return: List of skipped settings\n \"\"\"\n\n ssl_profile_name = self.get_name(ssl_profile_ref)\n csv_object = \\\n self.get_csv_object_list(csv_writer_dict_list,\n ['set ssl vserver', 'set ssl service',\n 'set ssl serviceGroup'])\n skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,\n vs_ref)\n return ssl_profile_name, skipped_list\n\n def get_application_profile_skipped(self, csv_writer_dict_list,\n name_of_object, vs_ref):\n \"\"\"\n This functions defines that get the skipped list of CSV row\n :param csv_writer_dict_list: CSV row of object from xlsx report\n :param name_of_object: object name like pool name, etc\n :param vs_ref: virtual service obj reference.\n :return: List of skipped settings\n \"\"\"\n\n ssl_profile_name = self.get_name(\n name_of_object['application_profile_ref'])\n csv_object = self.get_csv_object_list(\n csv_writer_dict_list, ['add ns httpProfile'])\n skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,\n vs_ref)\n return ssl_profile_name, skipped_list\n\n def get_network_profile_skipped(self, csv_writer_dict_list, name_of_object,\n vs_ref):\n \"\"\"\n This functions defines that get the skipped list of CSV row\n :param csv_writer_dict_list:List of add ns tcpProfile netscaler command rows\n :param name_of_object: object name like pool name, etc\n :param vs_ref: virtual service obj reference.\n :return: List of skipped settings\n \"\"\"\n\n ssl_profile_name = self.get_name(name_of_object['network_profile_ref'])\n csv_object = self.get_csv_object_list(\n csv_writer_dict_list, ['add ns tcpProfile'])\n skipped_list = self.get_csv_skipped_list(csv_object, ssl_profile_name,\n vs_ref)\n return ssl_profile_name, skipped_list\n\n def get_app_persistence_profile_skipped(self, csv_writer_dict_list,\n name_of_object, vs_ref):\n \"\"\"\n This functions defines that get the skipped list of CSV row\n :param csv_writer_dict_list: List of set lb group netscaler command rows\n :param name_of_object: object name like pool name, etc\n :param vs_ref: virtual service obj reference.\n :return: List of skipped settings\n \"\"\"\n # Changed ssl profile name to ssl profile ref.\n app_persistence_profile_name = self.get_name(\n name_of_object['ssl_profile_ref'])\n csv_object = self.get_csv_object_list(csv_writer_dict_list, ['set lb group'])\n skipped_list = self.get_csv_skipped_list(\n csv_object, app_persistence_profile_name, vs_ref)\n return app_persistence_profile_name, skipped_list\n\n def get_pool_skipped_list(self, avi_config, pool_group_name,\n skipped_setting, csv_object, obj_name,\n csv_writer_dict_list, vs_ref):\n \"\"\"\n This method is used for getting pool skipped list.\n :param avi_config: AVI dict\n :param pool_group_name: Name of Pool group\n :param skipped_setting: List of skipped settings\n :param csv_object: CSV row\n :param obj_name: Name of Object\n :param csv_writer_dict_list: List of bind lb vserver netscaler command\n rows\n :param vs_ref: vs object reference\n :return: List of skipped settings\n \"\"\"\n\n pool_group_object_ref = [pool_group_object_ref for pool_group_object_ref\n in avi_config['PoolGroup'] if\n pool_group_object_ref[\n 'name'] == pool_group_name]\n for pool_group in pool_group_object_ref:\n if 'members' in pool_group:\n for each_pool_ref in pool_group['members']:\n pool_name = self.get_name(each_pool_ref['pool_ref'])\n skipped_list = self.get_csv_skipped_list(csv_object, pool_name,\n vs_ref)\n if len(skipped_list) > 0:\n skipped_setting[obj_name] = {}\n skipped_setting[obj_name]['pool'] = {}\n skipped_setting[obj_name]['pool'][\n 'pool_name'] = pool_name\n skipped_setting[obj_name]['pool']['pool_skipped_list'] \\\n = skipped_list\n for pool_partial in csv_object:\n avi_object_json = self.format_string_to_json(\n pool_partial['AVI Object'])\n if avi_object_json['name'] == pool_name:\n if 'health_monitor_refs' in avi_object_json and \\\n avi_object_json['health_monitor_refs']:\n monitor_refs = \\\n avi_object_json['health_monitor_refs']\n for monitor_ref in monitor_refs:\n monitor_ref = self.get_name(monitor_ref)\n csv_object = self.get_csv_object_list(\n csv_writer_dict_list,\n ['add lb monitor'])\n skipped_list = self.get_csv_skipped_list(\n csv_object, monitor_ref, vs_ref)\n if skipped_list:\n skipped_setting[obj_name] = {}\n skipped_setting[obj_name]['pool'] = {}\n skipped_setting[obj_name]['pool'][\n 'pool_name'] = pool_name\n skipped_setting[obj_name]['pool'][\n 'health monitor'] = {}\n skipped_setting[obj_name]['pool'][\n 'health monitor'][\n 'name'] = monitor_ref\n skipped_setting[obj_name]['pool'][\n 'health monitor']['skipped_list'] =\\\n skipped_list\n if 'ssl_key_and_certificate_refs' in avi_object_json:\n name, skipped = \\\n self.get_ssl_key_and_cert_refs_skipped(\n csv_writer_dict_list, avi_object_json,\n vs_ref)\n if skipped:\n skipped_setting[obj_name] = {}\n skipped_setting[obj_name]['pool'] = {}\n skipped_setting[obj_name]['pool'][\n 'pool_name'] = pool_name\n skipped_setting[\n obj_name]['pool'][\n 'ssl key and cert'] = {}\n skipped_setting[\n obj_name]['pool']['ssl key and cert'][\n 'name'] = name\n skipped_setting[\n obj_name]['pool']['ssl key and cert'][\n 'skipped_list'] = skipped\n if 'ssl_profile_ref' in avi_object_json:\n name, skipped = \\\n self.get_ssl_profile_skipped(\n csv_writer_dict_list, avi_object_json[\n 'ssl_profile_ref'], vs_ref)\n if skipped:\n skipped_setting[obj_name] = {}\n skipped_setting[obj_name]['pool'] = {}\n skipped_setting[obj_name]['pool'][\n 'pool_name'] = pool_name\n skipped_setting[obj_name]['pool'][\n 'ssl profile'] = {}\n skipped_setting[obj_name]['pool'][\n 'ssl profile']['name'] = name\n skipped_setting[obj_name]['pool'][\n 'ssl profile']['skipped_list'] = skipped\n # Get the skipped settings of application\n # persistence profile ref.\n if 'application_persistence_profile_ref' in \\\n avi_object_json:\n name, skipped = \\\n self.get_app_persistence_profile_skipped(\n csv_writer_dict_list, avi_object_json,\n vs_ref)\n if skipped:\n skipped_setting[obj_name] = {}\n skipped_setting[obj_name]['pool'] = {}\n skipped_setting[obj_name]['pool'][\n 'pool_name'] = pool_name\n skipped_setting[obj_name]['pool'][\n 'Application Persistence profile'] = {}\n skipped_setting[obj_name]['pool'][\n 'Application Persistence profile'][\n 'name'] = name\n skipped_setting[obj_name]['pool'][\n 'Application Persistence profile'][\n 'skipped_list'] = skipped\n # Get the skipped settings of application\n # persistence profile ref.\n if 'application_persistence_profile_ref' \\\n in avi_object_json:\n name, skipped = \\\n self.get_app_persistence_profile_skipped(\n csv_writer_dict_list, avi_object_json,\n vs_ref)\n if skipped:\n skipped_setting[obj_name] = {}\n skipped_setting[obj_name]['pool'] = {}\n skipped_setting[obj_name]['pool'][\n 'pool_name'] = pool_name\n skipped_setting[obj_name]['pool'][\n 'Application Persistence profile'] = {}\n skipped_setting[obj_name]['pool'][\n 'Application Persistence profile'][\n 'name'] = name\n skipped_setting[obj_name]['pool'][\n 'Application Persistence profile'][\n 'skipped_list'] = skipped\n\n def vs_complexity_level(self):\n \"\"\"\n This method calculate complexity of vs.\n :return:\n \"\"\"\n vs_csv_objects = [row for row in csv_writer_dict_list\n if\n row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]\n and row['Netscaler Command'] in [\n 'add cs vserver', 'add lb vserver']]\n for vs_csv_object in vs_csv_objects:\n virtual_service = self.format_string_to_json(\n vs_csv_object['AVI Object'])\n # Update the complexity level of VS as Basic or Advanced\n self.update_vs_complexity_level(vs_csv_object, virtual_service)\n\n def vs_per_skipped_setting_for_references(self, avi_config):\n \"\"\"\n This functions defines that Add the skipped setting per VS CSV row\n :param avi_config: this methode use avi_config for checking vs skipped\n :return: None\n \"\"\"\n\n # Get the count of vs sucessfully migrated\n global fully_migrated\n global total_count\n global progressbar_count\n fully_migrated = 0\n # Get the VS object list which is having status successful and partial.\n vs_csv_objects = [row for row in csv_writer_dict_list\n if\n row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]\n and row['Netscaler Command'] in [\n 'add cs vserver', 'add lb vserver']]\n # calculate total count\n total_count = total_count + len(vs_csv_objects)\n for vs_csv_object in vs_csv_objects:\n progressbar_count += 1\n skipped_setting = {}\n virtual_service = self.format_string_to_json(\n vs_csv_object['AVI Object'])\n # Update the complexity level of VS as Basic or Advanced\n self.update_vs_complexity_level(vs_csv_object, virtual_service)\n vs_ref = virtual_service['name']\n repls = ('[', ''), (']', '')\n # Get list of skipped setting attributes\n skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv), repls,\n vs_csv_object['Skipped settings'])\n if skipped_setting_csv:\n skipped_setting['virtual_service'] = [skipped_setting_csv]\n # Get the skipped list for ssl key and cert\n if 'ssl_key_and_certificate_refs' in virtual_service:\n name, skipped = self.get_ssl_key_and_cert_refs_skipped(\n csv_writer_dict_list, virtual_service, vs_ref)\n if skipped:\n skipped_setting['ssl key and cert'] = {}\n skipped_setting['ssl key and cert']['name'] = name\n skipped_setting['ssl key and cert'][\n 'skipped_list'] = skipped\n # Get the skipped list for ssl profile name.\n # Changed ssl profile name to ssl profile ref.\n if 'ssl_profile_ref' in virtual_service:\n name, skipped = self.get_ssl_profile_skipped(\n csv_writer_dict_list, virtual_service['ssl_profile_ref'],\n vs_ref)\n if skipped:\n skipped_setting['ssl profile'] = {}\n skipped_setting['ssl profile']['name'] = name\n skipped_setting['ssl profile']['skipped_list'] = skipped\n # Get the skipped list for pool group.\n if 'pool_group_ref' in virtual_service:\n pool_group_name = self.get_name(\n virtual_service['pool_group_ref'])\n csv_object = self.get_csv_object_list(\n csv_writer_dict_list, ['bind lb vserver'])\n self.get_pool_skipped_list(\n avi_config, pool_group_name, skipped_setting, csv_object,\n 'pool group', csv_writer_dict_list, vs_ref)\n # Get the skipepd list for http policy.\n if 'http_policies' in virtual_service:\n csv_object = self.get_csv_object_list(\n csv_writer_dict_list,\n ['add cs policy', 'add responder policy',\n 'add rewrite policy'])\n for http_ref in virtual_service['http_policies']:\n http_name = self.get_name(http_ref['http_policy_set_ref'])\n skipped_list = self.get_csv_skipped_list(csv_object,\n http_name,\n vs_ref)\n if skipped_list:\n skipped_setting['Httppolicy'] = {}\n skipped_setting['Httppolicy']['name'] = http_name\n skipped_setting['Httppolicy'][\n 'skipped_list'] = skipped_list\n # Get the http policy name\n for each_http_policy in avi_config['HTTPPolicySet']:\n if each_http_policy['name'] == http_name:\n for http_req in \\\n each_http_policy['http_request_policy'][\n 'rules']:\n if http_req.get('switching_action', None) and \\\n http_req['switching_action'].get(\n 'pool_group_ref', None):\n pool_group_name = self.get_name(\n http_req['switching_action']\n ['pool_group_ref'])\n self.get_pool_skipped_list(\n avi_config, pool_group_name,\n skipped_setting, csv_object,\n 'Httppolicy',\n csv_writer_dict_list, vs_ref)\n # Get the skipped list for application_profile_ref.\n if 'application_profile_ref' in virtual_service and \\\n 'admin:System' not in \\\n virtual_service['application_profile_ref']:\n name, skipped = self.get_application_profile_skipped(\n csv_writer_dict_list, virtual_service, vs_ref)\n if skipped:\n skipped_setting['Application profile'] = {}\n skipped_setting['Application profile'][\n 'name'] = name\n skipped_setting['Application profile'][\n 'skipped_list'] = skipped\n # Get the skipped list for network profile ref.\n if 'network_profile_ref' in virtual_service and \\\n 'admin:System' not in \\\n virtual_service['network_profile_ref']:\n name, skipped = self.get_network_profile_skipped(\n csv_writer_dict_list, virtual_service, vs_ref)\n if skipped:\n skipped_setting['Network profile'] = {}\n skipped_setting['Network profile'][\n 'name'] = name\n skipped_setting['Network profile'][\n 'skipped_list'] = skipped\n # Update overall skipped setting of VS csv row\n if skipped_setting:\n vs_csv_object.update(\n {'Overall skipped settings': str(skipped_setting)})\n else:\n vs_csv_object.update(\n {'Overall skipped settings': \"FULLY MIGRATION\"})\n fully_migrated += 1\n msg = \"Writing excel sheet started...\"\n self.print_progress_bar(progressbar_count, total_count, msg,\n prefix='Progress', suffix='')\n csv_objects = [row for row in csv_writer_dict_list\n if row['Status'] in [STATUS_PARTIAL, STATUS_SUCCESSFUL]\n and row['Netscaler Command'] not in ['add cs vserver',\n 'add lb vserver']\n and (\n 'VS Reference' not in row or not row[\n 'VS Reference'])]\n # Update the vs reference not in used if objects are not attached to\n # VS directly or indirectly\n for csv_object in csv_objects:\n csv_object['VS Reference'] = STATUS_NOT_IN_USE\n\n def write_status_report_and_pivot_table_in_xlsx(self, row_list, output_dir,\n report_name, vs_level_status):\n \"\"\"\n This method writes the status and make pivot table in excel sheet\n :param row_list:\n :param output_dir:\n :param report_name:\n :param vs_level_status:\n :return:\n \"\"\"\n global total_count\n global progressbar_count\n # List of fieldnames for headers\n if vs_level_status:\n fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',\n 'Full Command', 'Status', 'Skipped settings',\n 'Indirect mapping', 'Not Applicable', 'User Ignored',\n 'Overall skipped settings', 'Complexity Level',\n 'VS Reference', 'AVI Object']\n else:\n fieldnames = ['Line Number', 'Netscaler Command', 'Object Name',\n 'Full Command', 'Status', 'Skipped settings',\n 'Indirect mapping', 'Not Applicable', 'User Ignored',\n 'Complexity Level' , 'AVI Object']\n xlsx_report = output_dir + os.path.sep + (\"%s-ConversionStatus.xlsx\" %\n report_name)\n # xlsx workbook\n status_wb = Workbook(xlsx_report)\n # xlsx worksheet\n status_ws = status_wb.add_worksheet(\"Status Sheet\")\n # Lock the first row of xls report.\n status_ws.freeze_panes(1, 0)\n first_row = 0\n for header in fieldnames:\n col = fieldnames.index(header)\n status_ws.write(first_row, col, header)\n row = 1\n for row_data in row_list:\n progressbar_count += 1\n for _key, _value in row_data.items():\n if _key in fieldnames:\n col = fieldnames.index(_key)\n status_ws.write(row, col, _value)\n msg = \"Writing excel sheet started...\"\n self.print_progress_bar(progressbar_count, total_count, msg,\n prefix='Progress', suffix='')\n row += 1\n status_wb.close()\n # create dataframe for row list\n df = pandas.DataFrame(row_list, columns=fieldnames)\n # create pivot table using pandas\n pivot_table = pandas.pivot_table(df,\n index=[\"Status\", \"Netscaler Command\"],\n values=[], aggfunc=[len], fill_value=0)\n # create dataframe for pivot table using pandas\n pivot_df = pandas.DataFrame(pivot_table)\n master_book = load_workbook(xlsx_report)\n master_writer = pandas.ExcelWriter(xlsx_report, engine='openpyxl')\n master_writer.book = master_book\n # Add pivot table in Pivot sheet\n pivot_df.to_excel(master_writer, 'Pivot Sheet')\n master_writer.save()\n\n def update_skip_duplicates(self, obj, obj_list, obj_type,\n merge_object_mapping, name, ent_type, prefix,\n syslist):\n \"\"\"\n This method merge duplicate objects\n :param obj: Source object to find duplicates for\n :param obj_list: List of object to search duplicates in\n :param obj_type: Type of object to add in converted_objs status\n :param converted_objs: Converted avi object or merged object name\n :param name: Name of the object\n :param default_profile_name : Name of root parent default profile\n :return:\n \"\"\"\n dup_of = None\n merge_object_mapping[obj_type].update({name: name})\n dup_of, old_name = self.check_for_duplicates(obj, obj_list, obj_type,\n merge_object_mapping, ent_type,\n prefix,\n syslist)\n if dup_of:\n LOG.info(\n \"Duplicate profiles: %s merged in %s\" % (obj['name'], dup_of))\n # Update value of ssl profile with merged profile\n if old_name in merge_object_mapping[obj_type].keys():\n merge_object_mapping[obj_type].update({old_name: dup_of})\n merge_object_mapping[obj_type].update({name: dup_of})\n return True\n return False\n\n def create_update_vsvip(self, vip, vsvip_config, tenant_ref, cloud_ref,\n prefix=None, vrf_ref=None):\n \"\"\"\n This functions defines that create or update VSVIP object.\n :param vip: vip of VS\n :param vsvip_config: List of vs object\n :param tenant_ref: tenant reference\n :param cloud_ref: cloud reference\n :param prefix: prefix for objects\n :param vrf_ref: VRF ref to be added in VIP object\n :return: None\n \"\"\"\n\n # Get the exsting vsvip object list if present\n name = vip + '-vsvip'\n # Added prefix for objects\n if prefix:\n name = prefix + '-' + name\n vsvip = [vip_obj for vip_obj in vsvip_config\n if vip_obj['name'] == name]\n if vsvip:\n diff_ten = [vips for vips in vsvip if vips['tenant_ref'] !=\n tenant_ref]\n if diff_ten:\n LOG.debug('VsVip %s is repeated with vrf %s but different '\n 'tenant %s', name, self.get_name(vrf_ref) if vrf_ref\n else 'None', self.get_name(tenant_ref))\n name = ''\n # If VSVIP object not present then create new VSVIP object.\n else:\n vsvip_object = {\n \"name\": name,\n \"tenant_ref\": tenant_ref,\n \"cloud_ref\": cloud_ref,\n \"vip\": [\n {\n \"vip_id\": \"0\",\n \"ip_address\": {\n \"type\": \"V4\",\n \"addr\": vip\n }\n }\n ],\n }\n if vrf_ref:\n vsvip_object[\"vrf_context_ref\"] = vrf_ref\n vsvip_config.append(vsvip_object)\n\n def get_redirect_fail_action(self, url):\n \"\"\"\n This method returns the fail action dict\n :param url: url\n :return:\n \"\"\"\n parsed = urlparse(url)\n redirect_fail_action = {\n 'fail_action': {\n 'redirect': {\n 'host': parsed.hostname,\n 'protocol': str(parsed.scheme).upper(),\n 'status_code': \"HTTP_REDIRECT_STATUS_CODE_302\"\n },\n \"type\": \"FAIL_ACTION_HTTP_REDIRECT\"\n }\n }\n if parsed.path:\n redirect_fail_action['fail_action']['redirect']['path'] = \\\n str(parsed.path).replace('\"', '')\n if parsed.query:\n redirect_fail_action['fail_action']['redirect'][\n 'query'] = parsed.query\n\n return redirect_fail_action\n\n def cleanup_dupof(self, avi_config):\n \"\"\"\n This method is used to clean up dup_of key from different AVI objects\n :param avi_config:\n :return:\n \"\"\"\n self.remove_dup_key(avi_config[\"ApplicationProfile\"])\n self.remove_dup_key(avi_config[\"NetworkProfile\"])\n self.remove_dup_key(avi_config[\"SSLProfile\"])\n self.remove_dup_key(avi_config['PKIProfile'])\n self.remove_dup_key(avi_config[\"ApplicationPersistenceProfile\"])\n self.remove_dup_key(avi_config['HealthMonitor'])\n\n def update_profile_ref(self, ref, avi_obj, merge_obj_list):\n \"\"\"\n This method is used to update the profile references which was\n attached at the time of creation\n :param ref:\n :param avi_obj:\n :param merge_obj_list:\n :return:\n \"\"\"\n for obj in avi_obj:\n obj_ref = obj.get(ref)\n tenant_ref = obj.get('tenant_ref')\n if obj_ref:\n name = self.get_name(obj_ref)\n tenant = self.get_name(tenant_ref)\n if name in merge_obj_list:\n updated_name = merge_obj_list[name]\n if ref == 'application_persistence_profile_ref':\n type_cons = OBJECT_TYPE_APPLICATION_PERSISTENCE_PROFILE\n if ref == 'application_profile_ref':\n type_cons = OBJECT_TYPE_APPLICATION_PROFILE\n obj[ref] = self.get_object_ref(updated_name, type_cons,\n tenant)\n\n def vs_redirect_http_to_https(self, avi_config, sysdict):\n\n \"\"\"\n Removes the VS which is redirected to another VS amd update the\n status and avi object for that VS\n :param avi_config: avi configuration after all conversion\n :param sysdict: system configuration\n :return:\n \"\"\"\n\n vsrem = {}\n LOG.debug(\"Check started for redirect from HTTP VS to HTTPS VS with \"\n \"no pool\")\n for vs in avi_config['VirtualService']:\n if not vs.get('pool_group_ref') and not vs.get(\n 'application_profile_ref') and vs.get('services', []) and \\\n not all([s.get('enable_ssl', True)for s in vs['services']])\\\n and vs.get('http_policies',[]) and vs['http_policies'][\n 0].get('http_policy_set_ref'):\n polname = self.get_name(vs['http_policies'][0][\n 'http_policy_set_ref'])\n pol = [pl for pl in avi_config['HTTPPolicySet'] if pl['name']\n == polname]\n if pol and pol[0].get('http_request_policy', {}).get('rules',\n []) and pol[0]['http_request_policy']['rules'][0].get(\n 'redirect_action'):\n iplist = [ip['ip_address']['addr'] for ip in vs.get('vip',\n []) if ip.get('ip_address',{}).get('addr')] or (\n [vs['ip_address']['addr']] if vs.get(\n 'ip_address',{}).get('addr') else [])\n if iplist:\n for nvs in avi_config['VirtualService']:\n if vs['name'] != nvs['name'] and [ip for ip in\n iplist if ip in ([nip['ip_address']['addr']\n for nip in nvs.get('vip', []) if nip.get(\n 'ip_address',{}).get('addr')] or [nvs[\n 'ip_address']['addr'] if nvs.get(\n 'ip_address',{}).get('addr') else []])]:\n appname = self.get_name(nvs[\n 'application_profile_ref']) if \\\n nvs.get('application_profile_ref') \\\n else None\n if appname == 'ns-migrate-http':\n LOG.debug(\"%s has redirect to %s, hence \"\n \"removing %s\" % (vs['name'],\n nvs['name'], vs['name']))\n vsrem[vs['name']] = nvs['name']\n appprof = [pr for pr in (avi_config[\n 'ApplicationProfile'] + sysdict[\n 'ApplicationProfile']) if pr['name']\n == appname]\n if appprof and appprof[0]['type'] == \\\n 'APPLICATION_PROFILE_TYPE_HTTP':\n if appprof[0].get('http_profile'):\n appprof[0]['http_profile'][\n 'http_to_https'] = True\n else:\n appprof[0]['http_profile'] = {\n 'http_to_https': True}\n LOG.debug(\"%s has redirect to %s, hence \"\n \"setting 'http_to_https' as true \"\n \"and removing %s\" %(vs['name'],\n nvs['name'], vs['name']))\n vsrem[vs['name']] = nvs['name']\n # Condition to merge http ports to https vs\n if [True for ssl in nvs['services'] if ssl[\n 'enable_ssl']] and \\\n [True for ssl_vs in vs['services']\n if not ssl_vs['enable_ssl']]:\n nvs['services'].append(vs['services'][0])\n vsrem[vs['name']] = nvs['name']\n\n LOG.debug(\"Check completed for redirect from HTTP VS to HTTPS VS with \"\n \"no pool\")\n if vsrem:\n avi_config['VirtualService'] = [v for v in avi_config[\n 'VirtualService'] if v['name'] not\n in vsrem.keys()]\n LOG.debug('%s VS got removed from AVI configuration' % str(len(\n vsrem)))\n for cl in csv_writer_dict_list:\n if cl['Object Name'] in vsrem.keys() and cl[\n 'Netscaler Command'] in ['add lb vserver', 'add cs vserver']:\n cl['Status'] = STATUS_INDIRECT\n cl['AVI Object'] = 'Redirected to %s' % vsrem[cl[\n 'Object Name']]\n\n def merge_pool(self, avi_config):\n \"\"\"\n This method merge the pools in AVI if HM is same\n :param avi_config:\n :return:\n \"\"\"\n mergelist=[]\n for poolgrp in avi_config['PoolGroup']:\n if poolgrp['name'] == 'lb-depoed1cdb.qai-pri-5984-poolgroup':\n print('found')\n # do not merge the pool if it is a backup pool in the group\n pool_member = [obj for obj in poolgrp['members'] if not\n obj.get('priority_label', '10') == '2']\n length = len(pool_member)\n for count in range(length):\n pool_name = pool_member[count]['pool_ref'].split(\n '&')[1].split('=')[1]\n if pool_name in mergelist:\n continue\n pool = [pl for pl in avi_config['Pool']\n if pl['name'] == pool_name]\n if not pool:\n LOG.debug(\"'%s' not present\" % pool_name)\n continue\n for count2 in range(count+1, length):\n pname = pool_member[count2]['pool_ref'].split(\n '&')[1].split('=')[1]\n nextpool = [pol for pol in avi_config['Pool']\n if pol['name'] == pname]\n if not nextpool:\n LOG.debug(\"'%s' not present\" % pname)\n continue\n if pool[0]['health_monitor_refs'].sort() == nextpool[0][\n 'health_monitor_refs'].sort():\n LOG.debug(\"Merging pool '%s' in '%s'\" % (nextpool[0][\n 'name'], pool[0]['name']))\n ip_port = set()\n for ser in pool[0]['servers']:\n ip_port.add(str(ser['ip']['addr']) + ':' + str(\n ser['port']))\n for server in nextpool[0]['servers']:\n ipport = str(server['ip']['addr']) + ':' + str(\n server['port'])\n if ipport not in list(ip_port):\n pool[0]['servers'].append(server)\n for cl in csv_writer_dict_list:\n if cl['Object Name'] == (nextpool[0][\n 'name'].replace('-pool','')) and cl[\n 'Netscaler Command'] in ['add service',\n 'add serviceGroup']:\n cl['AVI Object'] = 'Merged to %s' % pool[0][\n 'name']\n mergelist.append(nextpool[0]['name'])\n for plg in avi_config['PoolGroup']:\n plg['members'] = [member for member in plg['members'] if\n member['pool_ref'].split('&')[1].split('=')[1] not\n in mergelist]\n avi_config['Pool'] = [pools for pools in avi_config['Pool'] if pools[\n 'name'] not in mergelist]\n\n def add_policy(self, policy, updated_vs_name, avi_config, tmp_policy_ref,\n vs_obj, tenant_name, cloud_name, prefix, used_poolgrp_ref):\n \"\"\"\n This method is used to add policy objects to AVI and also add\n reference in VS\n :param policy: policy object\n :param updated_vs_name: vs name\n :param avi_config: avi config dict\n :param tmp_policy_ref: list of policy ref which are already used\n :param vs_obj: vs object\n :param tenant_name: name of tenant\n :param cloud_name: name of cloud\n :param prefix: prefix\n :param used_poolgrp_ref: list of used pool group ref\n :return:\n \"\"\"\n if policy['name'] in tmp_policy_ref:\n # clone the http policy set if it is referenced to other VS\n policy = self.clone_http_policy_set(policy, updated_vs_name,\n avi_config, tenant_name, cloud_name, used_poolgrp_ref,\n userprefix=prefix)\n updated_http_policy_ref = self.get_object_ref(policy['name'],\n OBJECT_TYPE_HTTP_POLICY_SET, tenant_name)\n\n tmp_policy_ref.append(policy['name'])\n http_policies = {\n 'index': 11,\n 'http_policy_set_ref': updated_http_policy_ref\n }\n if not vs_obj.get('http_policies'):\n vs_obj['http_policies'] = []\n else:\n ind = max([policies['index'] for policies in vs_obj[\n 'http_policies']])\n http_policies['index'] = ind + 1\n vs_obj['http_policies'].append(http_policies)\n avi_config['HTTPPolicySet'].append(policy)\n\n def build_redirect_action_dict(self, redirect_url, enable_ssl):\n \"\"\"\n This method returns a redirect action dict\n :param redirect_url: redirect url\n :param enable_ssl: flag for ssl enable\n :return:\n \"\"\"\n redirect_url = self.parse_url(redirect_url)\n protocol = str(redirect_url.scheme).upper()\n hostname = str(redirect_url.hostname)\n pathstring = str(redirect_url.path)\n querystring = str(redirect_url.query)\n full_path = '%s?%s' % (pathstring, querystring) if pathstring and \\\n querystring else pathstring\n protocol = enable_ssl and 'HTTPS' or 'HTTP' if not protocol else \\\n protocol\n action = {\n 'protocol': protocol\n }\n if hostname:\n action.update({'host':\n {\n 'type': 'URI_PARAM_TYPE_TOKENIZED',\n 'tokens': [{\n 'type': 'URI_TOKEN_TYPE_STRING',\n 'str_value': hostname,\n 'start_index': '0',\n 'end_index': '65535'\n }]\n }\n })\n if full_path:\n action.update({'path':\n {\n 'type': 'URI_PARAM_TYPE_TOKENIZED',\n 'tokens': [{\n 'type': 'URI_TOKEN_TYPE_STRING',\n 'str_value': full_path,\n 'start_index': '0',\n 'end_index': '65535'\n }]\n }\n })\n return action\n\n def create_http_to_https_custom_profile(self):\n '''\n\n :return: custom application profile dict\n '''\n return {\n 'name': \"ns-migrate-http\",\n 'type': \"APPLICATION_PROFILE_TYPE_HTTP\",\n 'tenant_ref': \"/api/tenant/?name=admin\",\n 'preserve_client_ip': False,\n 'http_profile': {\n 'max_rps_uri': 0,\n 'keepalive_header': False,\n 'max_rps_cip_uri': 0,\n 'x_forwarded_proto_enabled': False,\n 'connection_multiplexing_enabled': True,\n 'websockets_enabled': True,\n 'enable_request_body_buffering': False,\n 'hsts_enabled': False,\n 'xff_enabled': True,\n 'disable_keepalive_posts_msie6': True,\n 'keepalive_timeout': 30000,\n 'ssl_client_certificate_mode': \"SSL_CLIENT_CERTIFICATE_NONE\",\n 'http_to_https': True,\n 'max_bad_rps_cip_uri': 0,\n 'client_body_timeout': 30000,\n 'httponly_enabled': False,\n 'hsts_max_age': 365,\n 'max_bad_rps_cip': 0,\n 'server_side_redirect_to_https': False,\n 'client_max_header_size': 12,\n 'client_max_request_size': 48,\n 'max_rps_unknown_uri': 0,\n 'post_accept_timeout': 30000,\n 'client_header_timeout': 10000,\n 'secure_cookie_enabled': False,\n 'xff_alternate_name': \"X-Forwarded-For\",\n 'max_rps_cip': 0,\n 'client_max_body_size': 0,\n 'max_rps_unknown_cip': 0,\n 'allow_dots_in_header_name': False,\n 'max_bad_rps_uri': 0,\n 'use_app_keepalive_timeout': False\n },\n 'dos_rl_profile': {\n 'rl_profile': {\n 'client_ip_connections_rate_limit': {\n 'explicit_tracking': False,\n 'action': {\n 'status_code': \"HTTP_LOCAL_RESPONSE_STATUS_CODE_429\",\n 'type': \"RL_ACTION_NONE\"\n },\n 'fine_grain': False\n }\n },\n 'dos_profile': {\n 'thresh_period': 5\n }\n }\n }\n\n def correct_vs_ref(self, avi_config):\n \"\"\"\n This method corrects the reference of VS to different objects\n :param avi_config: avi configuration dict\n :return:\n \"\"\"\n global csv_writer_dict_list\n avi_graph = self.make_graph(avi_config)\n csv_dict_sub = [row for row in csv_writer_dict_list if row[\n 'Netscaler Command'] not in ('add lb vserver',\n 'add cs vserver') and row[\n 'Status'] in (STATUS_PARTIAL,\n STATUS_SUCCESSFUL)]\n for dict_row in csv_dict_sub:\n obj = dict_row['AVI Object']\n if isinstance(obj, str) and obj.startswith('{'):\n vs = []\n if '__/__' in obj:\n for dataobj in obj.split('__/__'):\n obj = eval(dataobj)\n self.add_vs_ref(obj, avi_graph, vs)\n else:\n obj = eval(obj)\n self.add_vs_ref(obj, avi_graph, vs)\n if vs:\n dict_row['VS Reference'] = str(list(set(vs)))\n else:\n dict_row['VS Reference'] = STATUS_NOT_IN_USE\n\n def add_vs_ref(self, obj, avi_graph, vs):\n \"\"\"\n Helper method for adding vs ref\n :param obj: object\n :param avi_graph: avi graph\n :param vs: VS list\n :return:\n \"\"\"\n obj_name = obj.get('name', obj.get('hostname'))\n if obj_name:\n if avi_graph.has_node(obj_name):\n LOG.debug(\"Checked predecessor for %s\", obj_name)\n predecessor = list(avi_graph.predecessors(obj_name))\n if predecessor:\n self.get_predecessor(predecessor, avi_graph, vs)\n else:\n LOG.debug(\"Object %s may be merged or orphaned\", obj_name)\n\n def get_predecessor(self, predecessor, avi_graph, vs):\n \"\"\"\n This method gets the predecessor of the object\n :param predecessor: predecessor list\n :param avi_graph: avi graph\n :param vs: VS list\n :return:\n \"\"\"\n if len(predecessor) > 1:\n for node in predecessor:\n nodelist = [node]\n self.get_predecessor(nodelist, avi_graph, vs)\n elif len(predecessor):\n node_obj = [nod for nod in list(avi_graph.nodes().data()) if\n nod[0] == predecessor[0]]\n if node_obj and (node_obj[0][1]['type'] == 'VS' or 'VS' in node_obj[\n 0][1]['type']):\n LOG.debug(\"Predecessor %s found\", predecessor[0])\n vs.extend(predecessor)\n else:\n LOG.debug(\"Checked predecessor for %s\", predecessor[0])\n nodelist = list(avi_graph.predecessors(predecessor[0]))\n self.get_predecessor(nodelist, avi_graph, vs)\n else:\n LOG.debug(\"No more predecessor\")\n"
] |
[
[
"pandas.DataFrame",
"pandas.pivot_table",
"pandas.ExcelWriter"
]
] |
Exhorder6/tvm
|
[
"7e3f068373937c0ae08d58f67b84030a027db1c9",
"7e3f068373937c0ae08d58f67b84030a027db1c9"
] |
[
"tutorials/autotvm/tune_relay_mobile_gpu.py",
"tests/python/relay/dyn/test_dynamic_op_level6.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nAuto-tuning a Convolutional Network for Mobile GPU\n==================================================\n**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy>`_\n\nAuto-tuning for a specific device is critical for getting the best\nperformance. This is a tutorial about how to tune a whole convolutional\nnetwork.\n\nThe operator implementation for Mobile GPU in TVM is written in template form.\nThe template has many tunable knobs (tile factor, vectorization, unrolling, etc).\nWe will tune all convolution, depthwise convolution and dense operators\nin the neural network. After tuning, we produce a log file which stores\nthe best knob values for all required operators. When the TVM compiler compiles\nthese operators, it will query this log file to get the best knob values.\n\nWe also released pre-tuned parameters for some arm devices. You can go to\n`Mobile GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#mobile-gpu>`_\nto see the results.\n\nNote that this tutorial will not run on Windows or recent versions of macOS. To\nget it to run, you will need to wrap the body of this tutorial in a :code:`if\n__name__ == \"__main__\":` block.\n\"\"\"\n\n######################################################################\n# Install dependencies\n# --------------------\n# To use the autotvm package in tvm, we need to install some extra dependencies.\n# (change \"3\" to \"2\" if you use python2):\n#\n# .. code-block:: bash\n#\n# pip3 install --user psutil xgboost tornado cloudpickle\n#\n# To make TVM run faster during tuning, it is recommended to use cython\n# as FFI of tvm. In the root directory of tvm, execute\n# (change \"3\" to \"2\" if you use python2):\n#\n# .. code-block:: bash\n#\n# pip3 install --user cython\n# sudo make cython3\n#\n# Now return to python code. Import packages.\n\nimport os\n\nimport numpy as np\n\nimport tvm\nfrom tvm import relay, autotvm\nimport tvm.relay.testing\nfrom tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner\nfrom tvm.contrib.utils import tempdir\nimport tvm.contrib.graph_executor as runtime\n\n#################################################################\n# Define network\n# --------------\n# First we need to define the network in relay frontend API.\n# We can load some pre-defined network from :code:`relay.testing`.\n# We can also load models from MXNet, ONNX and TensorFlow.\n\n\ndef get_network(name, batch_size):\n \"\"\"Get the symbol definition and random weight of a network\"\"\"\n input_shape = (batch_size, 3, 224, 224)\n output_shape = (batch_size, 1000)\n\n if \"resnet\" in name:\n n_layer = int(name.split(\"-\")[1])\n mod, params = relay.testing.resnet.get_workload(\n num_layers=n_layer, batch_size=batch_size, dtype=dtype\n )\n elif \"vgg\" in name:\n n_layer = int(name.split(\"-\")[1])\n mod, params = relay.testing.vgg.get_workload(\n num_layers=n_layer, batch_size=batch_size, dtype=dtype\n )\n elif name == \"mobilenet\":\n mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)\n elif name == \"squeezenet_v1.1\":\n mod, params = relay.testing.squeezenet.get_workload(\n batch_size=batch_size, version=\"1.1\", dtype=dtype\n )\n elif name == \"inception_v3\":\n input_shape = (batch_size, 3, 299, 299)\n mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)\n elif name == \"mxnet\":\n # an example for mxnet model\n from mxnet.gluon.model_zoo.vision import get_model\n\n block = get_model(\"resnet18_v1\", pretrained=True)\n mod, params = relay.frontend.from_mxnet(block, shape={\"data\": input_shape}, dtype=dtype)\n net = mod[\"main\"]\n net = relay.Function(\n net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs\n )\n mod = tvm.IRModule.from_expr(net)\n else:\n raise ValueError(\"Unsupported network: \" + name)\n\n return mod, params, input_shape, output_shape\n\n\n#################################################################\n# .. _tutorials-autotvm-start-rpc-tracker:\n\n#################################################################\n# Start RPC Tracker\n# -----------------\n# TVM uses RPC session to communicate with ARM boards.\n# During tuning, the tuner will send the generated code to the board and\n# measure the speed of code on the board.\n#\n# To scale up the tuning, TVM uses RPC Tracker to manage distributed devices.\n# The RPC Tracker is a centralized controller node. We can register all devices to\n# the tracker. For example, if we have 10 phones, we can register all of them\n# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.\n#\n# To start an RPC tracker, run this command on the host machine. The tracker is\n# required during the whole tuning process, so we need to open a new terminal for\n# this command:\n#\n# .. code-block:: bash\n#\n# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190\n#\n# The expected output is\n#\n# .. code-block:: bash\n#\n# INFO:RPCTracker:bind to 0.0.0.0:9190\n\n#################################################################\n# Register Devices to RPC Tracker\n# -----------------------------------\n# Now we can register our devices to the tracker. The first step is to\n# build the TVM runtime for the ARM devices.\n#\n# * For Linux:\n# Follow this section :ref:`build-tvm-runtime-on-device` to build\n# the TVM runtime on the device. Then register the device to tracker by\n#\n# .. code-block:: bash\n#\n# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399\n#\n# (replace :code:`[HOST_IP]` with the IP address of your host machine)\n#\n# * For Android:\n# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to\n# install TVM RPC APK on the android device. Make sure you can pass the android RPC test.\n# Then you have already registered your device. During tuning, you have to go to developer option\n# and enable \"Keep screen awake during changing\" and charge your phone to make it stable.\n#\n# After registering devices, we can confirm it by querying rpc_tracker\n#\n# .. code-block:: bash\n#\n# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190\n#\n# For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399,\n# the output can be\n#\n# .. code-block:: bash\n#\n# Queue Status\n# ----------------------------------\n# key total free pending\n# ----------------------------------\n# mate10pro 2 2 0\n# rk3399 2 2 0\n# rpi3b 11 11 0\n# ----------------------------------\n#\n# You can register multiple devices to the tracker to accelerate the measurement in tuning.\n\n###########################################\n# Set Tuning Options\n# ------------------\n# Before tuning, we should apply some configurations. Here I use an RK3399 board\n# as example. In your setting, you should modify the target and device_key accordingly.\n# set :code:`use_android` to True if you use android phone.\n\n#### DEVICE CONFIG ####\n# Replace \"aarch64-linux-gnu\" with the correct target of your board.\n# This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device.\ntarget = tvm.target.Target(\"opencl -device=mali\", host=\"llvm -mtriple=aarch64-linux-gnu\")\n\n# Also replace this with the device key in your tracker\ndevice_key = \"rk3399\"\n\n# Set this to True if you use android phone\nuse_android = False\n\n#### TUNING OPTION ####\nnetwork = \"resnet-18\"\nlog_file = \"%s.%s.log\" % (device_key, network)\ndtype = \"float32\"\n\ntuning_option = {\n \"log_filename\": log_file,\n \"tuner\": \"xgb\",\n \"n_trial\": 1000,\n \"early_stopping\": 450,\n \"measure_option\": autotvm.measure_option(\n builder=autotvm.LocalBuilder(build_func=\"ndk\" if use_android else \"default\"),\n runner=autotvm.RPCRunner(\n device_key,\n host=\"127.0.0.1\",\n port=9190,\n number=10,\n timeout=5,\n ),\n ),\n}\n\n####################################################################\n#\n# .. note:: How to set tuning options\n#\n# In general, the default values provided here work well.\n# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger,\n# which makes the tuning run longer.\n# If your device runs very slow or your conv2d operators have many GFLOPs, considering to\n# set timeout larger.\n#\n\n###################################################################\n# Begin Tuning\n# ------------\n# Now we can extract tuning tasks from the network and begin tuning.\n# Here, we provide a simple utility function to tune a list of tasks.\n# This function is just an initial implementation which tunes them in sequential order.\n# We will introduce a more sophisticated tuning scheduler in the future.\n\n# You can skip the implementation of this function for this tutorial.\ndef tune_tasks(\n tasks,\n measure_option,\n tuner=\"xgb\",\n n_trial=1000,\n early_stopping=None,\n log_filename=\"tuning.log\",\n use_transfer_learning=True,\n):\n # create tmp log file\n tmp_log_file = log_filename + \".tmp\"\n if os.path.exists(tmp_log_file):\n os.remove(tmp_log_file)\n\n for i, tsk in enumerate(reversed(tasks)):\n prefix = \"[Task %2d/%2d] \" % (i + 1, len(tasks))\n\n # create tuner\n if tuner == \"xgb\" or tuner == \"xgb-rank\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\")\n elif tuner == \"ga\":\n tuner_obj = GATuner(tsk, pop_size=50)\n elif tuner == \"random\":\n tuner_obj = RandomTuner(tsk)\n elif tuner == \"gridsearch\":\n tuner_obj = GridSearchTuner(tsk)\n else:\n raise ValueError(\"Invalid tuner: \" + tuner)\n\n if use_transfer_learning:\n if os.path.isfile(tmp_log_file):\n tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))\n\n # do tuning\n tsk_trial = min(n_trial, len(tsk.config_space))\n tuner_obj.tune(\n n_trial=tsk_trial,\n early_stopping=early_stopping,\n measure_option=measure_option,\n callbacks=[\n autotvm.callback.progress_bar(tsk_trial, prefix=prefix),\n autotvm.callback.log_to_file(tmp_log_file),\n ],\n )\n\n # pick best records to a cache file\n autotvm.record.pick_best(tmp_log_file, log_filename)\n os.remove(tmp_log_file)\n\n\n########################################################################\n# Finally, we launch tuning jobs and evaluate the end-to-end performance.\n\n\ndef tune_and_evaluate(tuning_opt):\n # extract workloads from relay program\n print(\"Extract tasks...\")\n mod, params, input_shape, _ = get_network(network, batch_size=1)\n tasks = autotvm.task.extract_from_program(\n mod[\"main\"],\n target=target,\n params=params,\n ops=(relay.op.get(\"nn.conv2d\"),),\n )\n\n # run tuning tasks\n print(\"Tuning...\")\n tune_tasks(tasks, **tuning_opt)\n\n # compile kernels with history best records\n with autotvm.apply_history_best(log_file):\n print(\"Compile...\")\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build_module.build(mod, target=target, params=params)\n # export library\n tmp = tempdir()\n if use_android:\n from tvm.contrib import ndk\n\n filename = \"net.so\"\n lib.export_library(tmp.relpath(filename), ndk.create_shared)\n else:\n filename = \"net.tar\"\n lib.export_library(tmp.relpath(filename))\n\n # upload module to device\n print(\"Upload...\")\n remote = autotvm.measure.request_remote(device_key, \"127.0.0.1\", 9190, timeout=10000)\n remote.upload(tmp.relpath(filename))\n rlib = remote.load_module(filename)\n\n # upload parameters to device\n dev = remote.device(str(target), 0)\n module = runtime.GraphModule(rlib[\"default\"](dev))\n data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n module.set_input(\"data\", data_tvm)\n\n # evaluate\n print(\"Evaluate inference time cost...\")\n ftimer = module.module.time_evaluator(\"run\", dev, number=1, repeat=30)\n prof_res = np.array(ftimer().results) * 1000 # convert to millisecond\n print(\n \"Mean inference time (std dev): %.2f ms (%.2f ms)\"\n % (np.mean(prof_res), np.std(prof_res))\n )\n\n\n# We do not run the tuning in our webpage server since it takes too long.\n# Uncomment the following line to run it by yourself.\n\n# tune_and_evaluate(tuning_option)\n\n######################################################################\n# Sample Output\n# -------------\n# The tuning needs to compile many programs and extract feature from them.\n# So a high performance CPU is recommended.\n# One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper.\n#\n# .. code-block:: bash\n#\n# Extract tasks...\n# Tuning...\n# [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done.\n# [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done.\n# [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done.\n# [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done.\n# [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done.\n# [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done.\n# [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done.\n# [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done.\n# [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done.\n# [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done.\n# [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done.\n# [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done.\n# [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done.\n# [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done.\n# [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done.\n# [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done.\n# [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done.\n# Compile...\n# Upload...\n# Evaluate inference time cost...\n# Mean inference time (std dev): 128.05 ms (7.74 ms)\n#\n\n######################################################################\n#\n# .. note:: **Experiencing Difficulties?**\n#\n# The auto tuning module is error-prone. If you always see \" 0.00/ 0.00 GFLOPS\",\n# then there must be something wrong.\n#\n# First, make sure you set the correct configuration of your device.\n# Then, you can print debug information by adding these lines in the beginning\n# of the script. It will print every measurement result, where you can find useful\n# error messages.\n#\n# .. code-block:: python\n#\n# import logging\n# logging.getLogger('autotvm').setLevel(logging.DEBUG)\n#\n# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level6 operator test cases.\n\"\"\"\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nimport tvm.testing\n\n\n@tvm.testing.uses_gpu\ndef test_dynamic_topk():\n def verify_topk(k, axis, ret_type, is_ascend, dtype):\n shape = (20, 100)\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n k_var = relay.var(\"x\", relay.TensorType((1,), \"float32\"))\n out = relay.topk(x, k_var, axis, ret_type, is_ascend, dtype)\n if isinstance(out, relay.expr.TupleWrapper):\n out = out.astuple()\n func = relay.Function([x, k_var], out)\n\n np_data = np.random.uniform(size=shape).astype(\"float32\")\n if is_ascend:\n np_indices = np.argsort(np_data, axis=axis)\n else:\n np_indices = np.argsort(-np_data, axis=axis)\n kk = k if k >= 1 else shape[axis]\n if axis == 0:\n np_indices = np_indices[:kk, :]\n np_values = np.zeros(np_indices.shape).astype(\"float32\")\n for i in range(shape[1]):\n np_values[:, i] = np_data[np_indices[:, i], i]\n else:\n np_indices = np_indices[:, :kk]\n np_values = np.zeros(np_indices.shape).astype(\"float32\")\n for i in range(shape[0]):\n np_values[i, :] = np_data[i, np_indices[i, :]]\n np_indices = np_indices.astype(dtype)\n\n for target, dev in tvm.testing.enabled_targets():\n for kind in [\"vm\", \"debug\"]:\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(kind, mod=mod, device=dev, target=target)\n op_res = intrp.evaluate()(np_data, np.array([k]).astype(\"float32\"))\n if ret_type == \"both\":\n tvm.testing.assert_allclose(op_res[0].numpy(), np_values)\n tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)\n elif ret_type == \"values\":\n tvm.testing.assert_allclose(op_res.numpy(), np_values)\n else:\n tvm.testing.assert_allclose(op_res.numpy(), np_indices)\n\n np.random.seed(0)\n for k in [0, 1, 5]:\n for axis in [0, -1, 1]:\n for ret_type in [\"both\", \"values\", \"indices\"]:\n verify_topk(k, axis, ret_type, True, \"int64\")\n verify_topk(k, axis, ret_type, False, \"float32\")\n\n\nif __name__ == \"__main__\":\n test_dynamic_topk()\n"
] |
[
[
"numpy.std",
"numpy.random.uniform",
"numpy.mean"
],
[
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.argsort"
]
] |
kanpurin/dctimagetransform
|
[
"b5950945922e4eafc17bb88fd28dfe5167ca3529"
] |
[
"dct_image_transform/reflection.py"
] |
[
"import numpy as np\nfrom dct_image_transform.dct import dct2\n\ndef reflection(image,axis=0):\n '''\n 8x8ใฎใใญใใฏใใจใซ้ขๆฃใณใตใคใณๅคๆใใใ็ปๅ(ไปฅไธDCT็ปๅ)ใ้กๅๅคๆใใ.\n\n Parameters\n ----------\n image:ๅน
ใจ้ซใใ8ใฎๅๆฐใงใใ็ปๅใ่กจใ2ๆฌกๅ
้
ๅ. 8ใฎๅๆฐใงใชใๅ ดๅใฎๅไฝใฏๆชๅฎ็พฉ.\n \n axis:ๅคๆใใ่ปธ. defalutใฏ`axis=0`\n\n Returns\n -------\n `image`ใ้กๅๅคๆใใDCT็ปๅใ่กจใ2ๆฌกๅ
้
ๅใ่ฟใ. `image`ใฎๅคใฏๅคใใใชใ.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(64).reshape((8,8))\n >>> a\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 61, 62, 63]])\n >>> dct_image_transform.reflection.reflection(a,axis=0)\n array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,\n 3.00000000e+00, 4.00000000e+00, 5.00000000e+00,\n 6.00000000e+00, 7.00000000e+00],\n [-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,\n -1.10000000e+01, -1.20000000e+01, -1.30000000e+01,\n -1.40000000e+01, -1.50000000e+01],\n [ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,\n 1.90000000e+01, 2.00000000e+01, 2.10000000e+01,\n 2.20000000e+01, 2.30000000e+01],\n [-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,\n -2.70000000e+01, -2.80000000e+01, -2.90000000e+01,\n -3.00000000e+01, -3.10000000e+01],\n [ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,\n 3.50000000e+01, 3.60000000e+01, 3.70000000e+01,\n 3.80000000e+01, 3.90000000e+01],\n [-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,\n -4.30000000e+01, -4.40000000e+01, -4.50000000e+01,\n -4.60000000e+01, -4.70000000e+01],\n [ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,\n 5.10000000e+01, 5.20000000e+01, 5.30000000e+01,\n 5.40000000e+01, 5.50000000e+01],\n [-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,\n -5.90000000e+01, -6.00000000e+01, -6.10000000e+01,\n -6.20000000e+01, -6.30000000e+01]])\n '''\n R = np.zeros((8,8),dtype=np.float)\n for i in range(8):\n R[i,7-i] = 1\n R = dct2(R)\n if axis == 0:\n return np.vstack(list(map(lambda m:np.dot(R,m),np.flip(np.vsplit(image,range(8,image.shape[1],8)),0))))\n elif axis == 1:\n return np.hstack(list(map(lambda m:np.dot(m,R),np.flip(np.hsplit(image,range(8,image.shape[1],8)),0))))"
] |
[
[
"numpy.dot",
"numpy.zeros"
]
] |
donghwijung/LoRCoN-LO
|
[
"37d4f97d2ae01a2dca1d086579ca3efaab77553b"
] |
[
"utils/plot.py"
] |
[
"import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# import sys, os\n# sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))\n\nimport process_data\nimport common\n\ndef plot_gt(Y_origin_data, pose_folder, preprocessed_folder, data_seqs, seq_sizes, dim=\"2d\", save_graph=True, dataset=\"KITTI\"):\n start_idx = 0\n end_idx = 0\n additional_row = np.array([0, 0, 0, 1], dtype=np.float64)\n for seq in data_seqs:\n end_idx += seq_sizes[seq]\n origin_poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64)\n for idx, row in enumerate(Y_origin_data[start_idx:end_idx]):\n new_pose = np.array(list(map(float, row.strip().split(\" \"))), dtype=np.float64)\n new_pose = np.concatenate((new_pose, additional_row))\n new_pose = new_pose.reshape(4,4)\n origin_poses[idx] = new_pose\n fig = plt.figure(figsize=(10,10))\n\n if dim == \"2d\":\n plt.scatter(origin_poses[:,0,3],origin_poses[:,1,3], c=origin_poses[:,2,3], s=20, alpha=0.5)\n else: # 3d\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(origin_poses[:,0,3],origin_poses[:,1,3],origin_poses[:,2,3],c=origin_poses[:,1,3], s=20, alpha=0.5)\n\n if save_graph:\n graph_folder = os.path.join('result', dataset, 'graph')\n os.makedirs(graph_folder, exist_ok=True)\n plt.savefig(os.path.join(graph_folder, f\"gt_{seq}_{dim}.png\"))\n # plt.close(fig)\n start_idx += seq_sizes[seq]\n \ndef plot_results(Y_origin_data, Y_estimated_data, data_seqs, rnn_size, seq_sizes, dim=\"2d\", save_graph=True, dataset=\"KITTI\"): \n start_idx = 0\n end_idx = 0\n additional_row = np.array([0, 0, 0, 1], dtype=np.float64)\n for i, seq in enumerate(data_seqs):\n end_idx += seq_sizes[seq]\n poses = np.zeros((Y_origin_data[start_idx:end_idx].shape[0], 4,4),dtype=np.float64)\n\n for idx in range(rnn_size):\n current_pose = np.array(list(map(float, Y_origin_data[start_idx+idx].strip().split(\" \"))), dtype=np.float64)\n current_pose = np.concatenate((current_pose, additional_row))\n current_pose = current_pose.reshape(4,4)\n poses[idx] = current_pose\n\n for idx, relative_pose in enumerate(Y_estimated_data[start_idx-i*rnn_size:end_idx-(i+1)*rnn_size]):\n rot_mat = common.euler_to_rot_mat(relative_pose[5],relative_pose[4],relative_pose[3])\n trans_mat = np.identity(4)\n trans_mat[:3,:3]=rot_mat\n trans_mat[0,3]=relative_pose[0]\n trans_mat[1,3]=relative_pose[1]\n trans_mat[2,3]=relative_pose[2]\n\n current_pose = np.dot(current_pose, trans_mat)\n poses[idx + rnn_size] = current_pose\n\n fig = plt.figure(figsize=(10,10))\n if dim == \"2d\":\n plt.scatter(poses[:,0,3],poses[:,1,3], c=poses[:,2,3], s=20, alpha=0.5)\n else: # 3d\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(poses[:,0,3],poses[:,1,3],poses[:,2,3],c=poses[:,1,3], s=20, alpha=0.5)\n\n if save_graph:\n graph_folder = os.path.join('result', dataset, 'graph')\n os.makedirs(graph_folder, exist_ok=True)\n plt.savefig(os.path.join(graph_folder, f\"est_{seq}_{dim}.png\"))\n # plt.close(fig)\n start_idx += seq_sizes[seq]"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.identity",
"matplotlib.pyplot.scatter"
]
] |
scpyork/GladAlert
|
[
"6b8622ee9ff5d53186e2d7225b856a8855fe1da8"
] |
[
"GLADalertTRASE/update_data/functions.py"
] |
[
"\nfrom new_alerts import *\nfrom PIL import Image # $ pip install pillow\nfrom scipy import sparse\nimport numpy as np\nimport re\n\nImage.MAX_IMAGE_PIXELS = None\n\ndef download(keep,tempdir):\n #print keep\n #class rt:pass\n name = keep.split('/')[-1]\n area = re.findall(r'_(\\d+[NESW\\b])',name)\n # current file position\n position = map(direction, area)\n #print position\n group = '|'.join((str(i) for i in position))\n\n date = keep.split('/')[-2]\n url = keep.replace('gs://','https://storage.cloud.google.com/')#+'?authuser=0'\n#2\n\n ## copy / download the files into the temp directory\n\n (os.popen('gsutil cp %s %s/%s >/dev/null 2>&1 && echo \"Copied: %s\" >> temp.log'%(keep,tempdir,name,keep))) #.read())\n\n #print (os.popen('gsutil cp %s %s/%s'%(keep,tempdir,name)).read(), 'gsutil cp %s %s/%s'%(keep,tempdir,name))\n\n\n ## Read image pixels using pillow library\n # >/dev/null 2>&1 && echo \"Copied: %s\" >> temp.log'\n im = Image.open('%s/%s'%(tempdir,name))\n ## Image pixels to a sparse array\n data = sparse.coo_matrix(im,int)\n ## remove downloaded file\n os.system('rm %s/%s'%(tempdir,name))\n\n\n data = np.array([\n data.row.astype(float)* 0.00025 + float(position[0]) ,\n #/float(data.shape[0])*(float(position[2]-position[0]))+position[0],\n data.col.astype(float)* 0.00025 + float(position[1]) ,\n #/float(data.shape[1])*(float(position[3]-position[1]))+position[1],\n data.data\n ])\n\n #print data[:,0],data[:,-1], position\n\n return data\n"
] |
[
[
"scipy.sparse.coo_matrix"
]
] |
rockyplum/vampy-host
|
[
"a410d680be2c15d76e31488db789ed30e6f34910"
] |
[
"test/test_process.py"
] |
[
"\nimport vamp\nimport numpy as np\nimport vamp.frames as fr\n\nplugin_key = \"vamp-test-plugin:vamp-test-plugin\"\nplugin_key_freq = \"vamp-test-plugin:vamp-test-plugin-freq\"\n\nrate = 44100\n\n# Throughout this file we have the assumption that the plugin gets run with a\n# blocksize of 1024, and with a step of 1024 for the time-domain version or 512\n# for the frequency-domain one. That is certainly expected to be the norm for a\n# plugin like this that declares no preference, and the Python Vamp module is\n# expected to follow the norm.\n\nblocksize = 1024\n\ndef input_data(n):\n # start at 1, not 0 so that all elts are non-zero\n return np.arange(n) + 1 \n\ndef test_process_n():\n buf = input_data(blocksize)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-summary\"))\n assert len(results) == 1\n\ndef test_process_freq_n():\n buf = input_data(blocksize)\n results = list(vamp.process_audio(buf, rate, plugin_key_freq, \"input-summary\", {}))\n assert len(results) == 2 # one complete block starting at zero, one half-full\n\ndef test_process_default_output():\n # If no output is specified, we should get the first one (instants)\n buf = input_data(blocksize)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"\", {}))\n assert len(results) == 10\n for i in range(len(results)):\n expectedTime = vamp.vampyhost.RealTime('seconds', i * 1.5)\n actualTime = results[i][\"timestamp\"]\n assert expectedTime == actualTime\n\ndef test_process_summary_param():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-summary\", { \"produce_output\": 0 }))\n assert len(results) == 0\n\ndef test_process_multi_summary_param():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ \"input-summary\" ], { \"produce_output\": 0 }))\n assert len(results) == 0\n\ndef test_process_summary_param_bool():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-summary\", { \"produce_output\": False }))\n assert len(results) == 0\n\ndef test_process_multi_summary_param_bool():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ \"input-summary\" ], { \"produce_output\": False }))\n assert len(results) == 0\n\ndef test_process_summary():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-summary\", {}))\n assert len(results) == 10\n for i in range(len(results)):\n #\n # each feature has a single value, equal to the number of non-zero elts\n # in the input block (which is all of them, i.e. the blocksize) plus\n # the first elt (which is i * blockSize + 1)\n #\n expected = blocksize + i * blocksize + 1\n actual = results[i][\"values\"][0]\n assert actual == expected\n\ndef test_process_frames_summary():\n buf = input_data(blocksize * 10)\n ff = fr.frames_from_array(buf, blocksize, blocksize)\n results = list(vamp.process_frames(ff, rate, blocksize, plugin_key, \"input-summary\", {}))\n assert len(results) == 10\n for i in range(len(results)):\n #\n # each feature has a single value, equal to the number of non-zero elts\n # in the input block (which is all of them, i.e. the blocksize) plus\n # the first elt (which is i * blockSize + 1)\n #\n expected = blocksize + i * blocksize + 1\n actual = results[i][\"values\"][0]\n assert actual == expected\n\ndef test_process_multi_summary():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ \"input-summary\" ], {}))\n assert len(results) == 10\n for i in range(len(results)):\n #\n # each feature has a single value, equal to the number of non-zero elts\n # in the input block (which is all of them, i.e. the blocksize) plus\n # the first elt (which is i * blockSize + 1)\n #\n expected = blocksize + i * blocksize + 1\n actual = results[i][\"input-summary\"][\"values\"][0]\n assert actual == expected\n\ndef test_process_frames_multi_summary():\n buf = input_data(blocksize * 10)\n ff = fr.frames_from_array(buf, blocksize, blocksize)\n results = list(vamp.process_frames_multiple_outputs(ff, rate, blocksize, plugin_key, [ \"input-summary\" ], {}))\n assert len(results) == 10\n for i in range(len(results)):\n #\n # each feature has a single value, equal to the number of non-zero elts\n # in the input block (which is all of them, i.e. the blocksize) plus\n # the first elt (which is i * blockSize + 1)\n #\n expected = blocksize + i * blocksize + 1\n actual = results[i][\"input-summary\"][\"values\"][0]\n assert actual == expected\n\ndef test_process_freq_summary():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key_freq, \"input-summary\", {}))\n assert len(results) == 20\n for i in range(len(results)):\n #\n # sort of as above, but much much subtler:\n #\n # * the input block is converted to frequency domain but then converted\n # back within the plugin, so the values being reported are time-domain\n # ones but with windowing and FFT shift\n # \n # * the effect of FFT shift is that the first element in the\n # re-converted frame is actually the one that was at the start of the\n # second half of the original frame\n #\n # * and the last block is only half-full, so the \"first\" elt in that\n # one, which actually comes from just after the middle of the block,\n # will be zero\n #\n # * windowing does not affect the value of the first elt, because\n # (before fft shift) it came from the peak of the window shape where\n # the window value is 1\n #\n # * but windowing does affect the number of non-zero elts, because the\n # asymmetric window used has one value very close to zero in it\n #\n # * the step size (the increment in input value from one block to the\n # next) is only half the block size\n #\n expected = i * (blocksize/2) + blocksize/2 + 1 # \"first\" elt\n if (i == len(results)-1):\n expected = 0\n expected = expected + blocksize - 1 # non-zero elts\n actual = results[i][\"values\"][0]\n eps = 1e-6\n assert abs(actual - expected) < eps\n\ndef test_process_freq_summary_shift():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key_freq, \"input-summary\", {}, process_timestamp_method = vamp.vampyhost.SHIFT_DATA))\n assert len(results) == 20\n for i in range(len(results)):\n # as test_process_freq_summary, except that the input is effectively\n # padded by the adapter with an additional half-blocksize of zeros\n # before conversion\n if i == 0:\n # this block doesn't interact at all well with our test, we get\n # spurious low values in the block converted back within the plugin\n # because of the big discontinuity & window ripple after fftshift\n pass\n else:\n expected = (i-1) * (blocksize/2) + blocksize/2 + 1 # for \"first\" elt\n expected = expected + blocksize - 1 # non-zero elts\n actual = results[i][\"values\"][0]\n eps = 1e-6\n assert abs(actual - expected) < eps\n\ndef test_process_multi_freq_summary():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ \"input-summary\" ], {}))\n assert len(results) == 20\n for i in range(len(results)):\n expected = i * (blocksize/2) + blocksize/2 + 1 # \"first\" elt\n if (i == len(results)-1):\n expected = 0\n expected = expected + blocksize - 1 # non-zero elts\n actual = results[i][\"input-summary\"][\"values\"][0]\n eps = 1e-6\n assert abs(actual - expected) < eps\n\ndef test_process_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-timestamp\", {}))\n assert len(results) == 10\n for i in range(len(results)):\n # The timestamp should be the frame number of the first frame in the\n # input buffer\n expected = i * blocksize\n actual = results[i][\"values\"][0]\n assert actual == expected\n\ndef test_process_multi_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ \"input-timestamp\" ]))\n assert len(results) == 10\n for i in range(len(results)):\n # The timestamp should be the frame number of the first frame in the\n # input buffer\n expected = i * blocksize\n actual = results[i][\"input-timestamp\"][\"values\"][0]\n assert actual == expected\n\ndef test_process_freq_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key_freq, \"input-timestamp\", {}))\n assert len(results) == 20\n for i in range(len(results)):\n # The timestamp should be the frame number of the frame just beyond\n # half-way through the input buffer\n expected = i * (blocksize/2) + blocksize/2\n actual = results[i][\"values\"][0]\n if actual == 2047 and expected == 2048:\n print(\"This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.\")\n assert actual == expected\n\ndef test_process_freq_shift_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key_freq, \"input-timestamp\", process_timestamp_method = vamp.vampyhost.SHIFT_DATA))\n assert len(results) == 20\n for i in range(len(results)):\n # The timestamp should be the frame number of the frame at the start of\n # the input buffer\n expected = i * (blocksize/2)\n actual = results[i][\"values\"][0]\n if actual == 2047 and expected == 2048:\n print(\"This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.\")\n assert actual == expected\n\ndef test_process_multi_freq_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key_freq, [ \"input-timestamp\" ], {}))\n assert len(results) == 20\n for i in range(len(results)):\n # The timestamp should be the frame number of the frame just beyond\n # half-way through the input buffer\n expected = i * (blocksize/2) + blocksize/2\n actual = results[i][\"input-timestamp\"][\"values\"][0]\n if actual == 2047 and expected == 2048:\n print(\"This test fails because of a bug in the Vamp plugin SDK. Please update to SDK version 2.6.\")\n assert actual == expected\n\ndef test_process_blocksize_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-timestamp\", {}, block_size = blocksize * 2)) # step size defaults to block size\n assert len(results) == 5\n for i in range(len(results)):\n # The timestamp should be the frame number of the first frame in the\n # input buffer\n expected = i * blocksize * 2\n actual = results[i][\"values\"][0]\n assert actual == expected\n\ndef test_process_stepsize_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-timestamp\", {}, step_size = int(blocksize / 2)))\n assert len(results) == 20\n for i in range(len(results)):\n # The timestamp should be the frame number of the first frame in the\n # input buffer\n expected = (i * blocksize) / 2\n actual = results[i][\"values\"][0]\n assert actual == expected\n\ndef test_process_stepsize_blocksize_timestamps():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio(buf, rate, plugin_key, \"input-timestamp\", {}, block_size = blocksize * 2, step_size = int(blocksize / 2)))\n assert len(results) == 20\n for i in range(len(results)):\n # The timestamp should be the frame number of the first frame in the\n # input buffer\n expected = (i * blocksize) / 2\n actual = results[i][\"values\"][0]\n assert actual == expected\n\ndef test_process_multiple_outputs():\n buf = input_data(blocksize * 10)\n results = list(vamp.process_audio_multiple_outputs(buf, rate, plugin_key, [ \"input-summary\", \"input-timestamp\" ], {}))\n assert len(results) == 20\n si = 0\n ti = 0\n for r in results:\n assert \"input-summary\" in r or \"input-timestamp\" in r\n if \"input-summary\" in r:\n expected = blocksize + si * blocksize + 1\n actual = r[\"input-summary\"][\"values\"][0]\n assert actual == expected\n si = si + 1\n if \"input-timestamp\" in r:\n expected = ti * blocksize\n actual = r[\"input-timestamp\"][\"values\"][0]\n assert actual == expected\n ti = ti + 1\n"
] |
[
[
"numpy.arange"
]
] |
ACL2020-Submission/ACL2020
|
[
"2a3d6e26d22c650cad823c68b65ee315aa1fe22c"
] |
[
"onmt/utils/parse.py"
] |
[
"import configargparse as cfargparse\r\nimport os\r\n\r\nimport torch\r\n\r\nimport onmt.opts as opts\r\nfrom onmt.utils.logging import logger\r\n\r\n\r\nclass ArgumentParser(cfargparse.ArgumentParser):\r\n def __init__(\r\n self,\r\n config_file_parser_class=cfargparse.YAMLConfigFileParser,\r\n formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,\r\n **kwargs):\r\n super(ArgumentParser, self).__init__(\r\n config_file_parser_class=config_file_parser_class,\r\n formatter_class=formatter_class,\r\n **kwargs)\r\n\r\n @classmethod\r\n def defaults(cls, *args):\r\n \"\"\"Get default arguments added to a parser by all ``*args``.\"\"\"\r\n dummy_parser = cls()\r\n for callback in args:\r\n callback(dummy_parser)\r\n defaults = dummy_parser.parse_known_args([])[0]\r\n return defaults\r\n\r\n @classmethod\r\n def update_model_opts(cls, model_opt):\r\n if model_opt.word_vec_size > 0:\r\n model_opt.src_word_vec_size = model_opt.word_vec_size\r\n model_opt.tgt_word_vec_size = model_opt.word_vec_size\r\n\r\n if model_opt.layers > 0:\r\n model_opt.enc_layers = model_opt.layers\r\n model_opt.dec_layers = model_opt.layers\r\n\r\n if model_opt.rnn_size > 0:\r\n model_opt.enc_rnn_size = model_opt.rnn_size\r\n model_opt.dec_rnn_size = model_opt.rnn_size\r\n\r\n model_opt.brnn = model_opt.encoder_type == \"brnn\"\r\n\r\n if model_opt.copy_attn_type is None:\r\n model_opt.copy_attn_type = model_opt.global_attention\r\n\r\n @classmethod\r\n def validate_model_opts(cls, model_opt):\r\n assert model_opt.model_type in [\"text\", \"img\", \"audio\", \"vec\"], \\\r\n \"Unsupported model type %s\" % model_opt.model_type\r\n\r\n # this check is here because audio allows the encoder and decoder to\r\n # be different sizes, but other model types do not yet\r\n same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size\r\n assert model_opt.model_type == 'audio' or same_size, \\\r\n \"The encoder and decoder rnns must be the same size for now\"\r\n\r\n assert model_opt.rnn_type != \"SRU\" or model_opt.gpu_ranks, \\\r\n \"Using SRU requires -gpu_ranks set.\"\r\n if model_opt.share_embeddings:\r\n if model_opt.model_type != \"text\":\r\n raise AssertionError(\r\n \"--share_embeddings requires --model_type text.\")\r\n\r\n @classmethod\r\n def ckpt_model_opts(cls, ckpt_opt):\r\n # Load default opt values, then overwrite with the opts in\r\n # the checkpoint. That way, if there are new options added,\r\n # the defaults are used.\r\n opt = cls.defaults(opts.model_opts)\r\n opt.__dict__.update(ckpt_opt.__dict__)\r\n return opt\r\n\r\n @classmethod\r\n def validate_train_opts(cls, opt):\r\n if opt.epochs:\r\n raise AssertionError(\r\n \"-epochs is deprecated please use -train_steps.\")\r\n if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:\r\n raise AssertionError(\"BPTT is not compatible with -accum > 1\")\r\n\r\n if opt.gpuid:\r\n raise AssertionError(\r\n \"gpuid is deprecated see world_size and gpu_ranks\")\r\n if torch.cuda.is_available() and not opt.gpu_ranks:\r\n logger.info(\"WARNING: You have a CUDA device, \\\r\n should run with -gpu_ranks\")\r\n if opt.world_size < len(opt.gpu_ranks):\r\n raise AssertionError(\r\n \"parameter counts of -gpu_ranks must be less or equal \"\r\n \"than -world_size.\")\r\n if opt.world_size == len(opt.gpu_ranks) and \\\r\n min(opt.gpu_ranks) > 0:\r\n raise AssertionError(\r\n \"-gpu_ranks should have master(=0) rank \"\r\n \"unless -world_size is greater than len(gpu_ranks).\")\r\n assert len(opt.data_ids) == len(opt.data_weights), \\\r\n \"Please check -data_ids and -data_weights options!\"\r\n\r\n assert len(opt.dropout) == len(opt.dropout_steps), \\\r\n \"Number of dropout values must match accum_steps values\"\r\n\r\n assert len(opt.attention_dropout) == len(opt.dropout_steps), \\\r\n \"Number of attention_dropout values must match accum_steps values\"\r\n\r\n @classmethod\r\n def validate_translate_opts(cls, opt):\r\n if opt.beam_size != 1 and opt.random_sampling_topk != 1:\r\n raise ValueError('Can either do beam search OR random sampling.')\r\n\r\n @classmethod\r\n def validate_preprocess_args(cls, opt):\r\n assert opt.max_shard_size == 0, \\\r\n \"-max_shard_size is deprecated. Please use \\\r\n -shard_size (number of examples) instead.\"\r\n assert opt.shuffle == 0, \\\r\n \"-shuffle is not implemented. Please shuffle \\\r\n your data before pre-processing.\"\r\n\r\n assert len(opt.train_src) == len(opt.train_tgt), \\\r\n \"Please provide same number of src and tgt train files!\"\r\n\r\n assert len(opt.train_src) == len(opt.train_ids), \\\r\n \"Please provide proper -train_ids for your data!\"\r\n\r\n for file in opt.train_src + opt.train_tgt:\r\n assert os.path.isfile(file), \"Please check path of %s\" % file\r\n\r\n assert not opt.valid_src or os.path.isfile(opt.valid_src), \\\r\n \"Please check path of your valid src file!\"\r\n assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \\\r\n \"Please check path of your valid tgt file!\"\r\n\r\n assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \\\r\n \"Please check path of your src vocab!\"\r\n assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \\\r\n \"Please check path of your tgt vocab!\"\r\n"
] |
[
[
"torch.cuda.is_available"
]
] |
jona-sassenhagen/URIAL
|
[
"ed4e9cc99bac0a7ec8772ad72c3d85581be71de0"
] |
[
"plot_mds_cond.py"
] |
[
"def plot_mds_cond(rdm):\n '''function to visualize RDM via multidimensional scaling'''\n\n # big kudos to Jona Sassenhagen for doing an amazing job\n # adding condition names and colors to the mds plot\n\n # import modules and functions\n import numpy as np\n import pandas as pd\n import seaborn as sns\n import matplotlib.pyplot as plt\n from sklearn import manifold\n from sklearn.decomposition import PCA\n from matplotlib.collections import LineCollection\n\n ## computation/transformation section\n\n # read in the rdm in .csv format, creating a data frame\n df = pd.read_csv(rdm, index_col=0)\n df.index = df.columns # set data frame index based on columns\n\n # set seed for mds\n seed = 0\n\n # create mds object\n mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,\n dissimilarity=\"precomputed\", n_jobs=1)\n # apply mds to data frame\n rdm_mds = mds.fit(df.values).embedding_\n\n # create new data frame from mds\n df_mds = pd.DataFrame(rdm_mds, index=df.index, columns=[\"dim1\", \"dim2\"])\n df_mds[\"cond\"] = df_mds.index # create condition column based on index\n\n # create pca object\n clf = PCA(n_components=2)\n\n # set rdm data frame based on data frame values\n rdm = pd.DataFrame(df.values)\n\n # scale data\n rdm = rdm.max() / rdm * 100\n rdm[np.isinf(rdm)] = 0\n\n # convert rdm data frame to array\n rdm = rdm.as_matrix()\n\n # apply pca to mds\n rdm_mds_pca = clf.fit_transform(rdm_mds)\n\n ## plotting section\n\n sns.set_style(\"white\") # set seaborn style to white\n # create lmplot from the mds data frame\n g = sns.lmplot(\"dim1\", \"dim2\", hue=\"cond\", data=df_mds, fit_reg=False, legend=False)\n ax = g.ax # set axes\n sns.despine(ax=ax, trim=True, left=True, bottom=True) # despine graphic\n ax.axes.get_xaxis().set_visible(False) # remove x axis\n ax.axes.get_yaxis().set_visible(False) # remove y axis\n ax.grid(False) # remove gird\n\n # add condition names to plot\n for dim1, dim2, name in df_mds.values:\n ax.text(dim1 * 1.05, dim2 * 1.05, name)\n\n # create segments\n segments = [[rdm_mds[i, :], rdm_mds[j, :]]\n for i in range(len(rdm_mds_pca)) for j in range(len(rdm_mds_pca))]\n values = np.abs(rdm)\n\n # set line collection\n lc = LineCollection(segments,\n zorder=0, cmap=plt.cm.Greys,\n norm=plt.Normalize(0, values.max()))\n lc.set_array(rdm.flatten())\n lc.set_linewidths(0.5 * np.ones(len(segments)))\n ax.add_collection(lc) # add line collection to plot\n\n plt.tight_layout()\n plt.show()"
] |
[
[
"numpy.isinf",
"pandas.DataFrame",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.tight_layout",
"numpy.abs",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.manifold.MDS"
]
] |
mirtorande/grpf-tool
|
[
"5c20365366503f28d63f861f1b0326cf1dcdcd7e"
] |
[
"visualize.py"
] |
[
"#!/usr/bin/env python3\nfrom matplotlib.patches import Circle, Rectangle, ConnectionPatch\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import animation\nfrom math import floor\n\nColors = ['green', 'purple', 'orange', 'red', 'blue', 'yellow']\n\n\nclass Animation:\n def __init__(self, my_map, starts, goals, paths, predictions):\n self.my_map = np.flip(np.transpose(my_map), 1)\n self.predictions = predictions\n self.starts = []\n for start in starts:\n self.starts.append((start[1], len(self.my_map[0]) - 1 - start[0]))\n self.goals = []\n for goal in goals:\n self.goals.append((goal[1], len(self.my_map[0]) - 1 - goal[0]))\n self.paths = []\n if paths:\n for path in paths:\n self.paths.append([])\n for loc in path:\n self.paths[-1].append((loc[1], len(self.my_map[0]) - 1 - loc[0]))\n\n aspect = len(self.my_map) / len(self.my_map[0])\n\n self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))\n self.ax = self.fig.add_subplot(111, aspect='equal')\n self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None)\n # self.ax.set_frame_on(False)\n\n self.patches = []\n self.artists = []\n self.agents = dict()\n self.agent_names = dict()\n self.goal_predictions = dict()\n self.agent_goal_connections = dict()\n # create boundary patch\n\n x_min = -0.5\n y_min = -0.5\n x_max = len(self.my_map) - 0.5\n y_max = len(self.my_map[0]) - 0.5\n plt.xlim(x_min, x_max)\n plt.ylim(y_min, y_max)\n plt.xticks(np.arange(x_min, x_max, 1))\n plt.yticks(np.arange(y_min, y_max, 1))\n plt.grid(color='0.85')\n\n self.patches.append(Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, facecolor='none', edgecolor='gray'))\n for i in range(len(self.my_map)):\n for j in range(len(self.my_map[0])):\n if self.my_map[i][j]:\n self.patches.append(Rectangle((i - 0.5, j - 0.5), 1, 1, facecolor='gray', edgecolor='gray'))\n\n self.T = 0\n # draw goals\n for i, goal in enumerate(self.goals):\n goal_color = Colors[i % len(Colors)]\n self.patches.append(Rectangle((goal[0] - 0.25, goal[1] - 0.25), 0.5, 0.5, facecolor=goal_color,\n edgecolor='black', alpha=0.5))\n \n # create agents\n for a in range(len(self.paths)):\n name = str(a)\n self.agents[a] = Circle((starts[a][0], starts[a][1]), 0.3, facecolor=Colors[a % len(Colors)],\n edgecolor='black')\n self.agents[a].original_face_color = Colors[a % len(Colors)]\n self.patches.append(self.agents[a])\n self.T = max(self.T, len(paths[a]) - 1)\n self.agent_names[a] = self.ax.text(starts[a][0], starts[a][1] + 0.25, name)\n self.agent_names[a].set_horizontalalignment('center')\n self.agent_names[a].set_verticalalignment('center')\n self.artists.append(self.agent_names[a])\n\n # connections & predictions\n self.goal_predictions[a] = dict()\n self.agent_goal_connections[a] = dict()\n for i, goal in enumerate(self.goals):\n goal_color = Colors[i % len(Colors)]\n self.goal_predictions[a][i] = self.ax.text(goal[0], goal[1], str(i))\n self.goal_predictions[a][i].set_horizontalalignment('center')\n self.goal_predictions[a][i].set_verticalalignment('center')\n self.artists.append(self.goal_predictions[a][i])\n self.agent_goal_connections[a][i] = plt.Line2D((start[1], goal[0]), (len(self.my_map[0]) - 1 - start[0], goal[1]), lw=2.5, color = goal_color)\n self.artists.append(self.agent_goal_connections[a][i])\n\n self.animation = animation.FuncAnimation(self.fig, self.animate_func,\n init_func=self.init_func,\n frames=int(self.T + 1) * 10,\n interval=100,\n blit=True)\n\n def save(self, file_name, speed):\n self.animation.save(\n file_name,\n fps=10 * speed,\n dpi=200,\n savefig_kwargs={\"pad_inches\": 0})\n\n @staticmethod\n def show():\n plt.show()\n\n def init_func(self):\n for p in self.patches:\n self.ax.add_patch(p)\n for a in self.artists:\n self.ax.add_artist(a)\n return self.patches + self.artists\n\n def animate_func(self, t):\n # per ogni agente\n for a in range(len(self.paths)):\n pos = self.get_state(t / 10, self.paths[a])\n self.agents[a].center = (pos[0], pos[1])\n self.agent_names[a].set_position((pos[0], pos[1] + 0.5))\n # per ogni goal\n for i in self.agent_goal_connections[a]:\n timestep = floor(t/10)\n if timestep not in self.predictions[a]:\n continue\n\n prediction = self.predictions[a][timestep][i]\n # Linee\n self.agent_goal_connections[a][i].set_data([pos[0], self.goals[i][0]], [pos[1], self.goals[i][1]])\n self.agent_goal_connections[a][i].set_alpha(prediction)\n # Percentuali\n self.goal_predictions[a][i].set_text(\"{:.2f}\".format(prediction*100))\n self.goal_predictions[a][i].set_position([(pos[0] + self.goals[i][0])/2, (pos[1] + self.goals[i][1])/2])\n self.goal_predictions[a][i].set_alpha(prediction)\n\n\n # reset all colors\n for _, agent in self.agents.items():\n agent.set_facecolor(agent.original_face_color)\n\n # check drive-drive collisions\n agents_array = [agent for _, agent in self.agents.items()]\n for i in range(0, len(agents_array)):\n for j in range(i + 1, len(agents_array)):\n d1 = agents_array[i]\n d2 = agents_array[j]\n pos1 = np.array(d1.center)\n pos2 = np.array(d2.center)\n if np.linalg.norm(pos1 - pos2) < 0.7:\n d1.set_facecolor('red')\n d2.set_facecolor('red')\n print(\"COLLISION! (agent-agent) ({}, {}) at time {}\".format(i, j, t/10))\n\n return self.patches + self.artists\n\n @staticmethod\n def get_state(t, path):\n if int(t) <= 0:\n return np.array(path[0])\n elif int(t) >= len(path):\n return np.array(path[-1])\n else:\n pos_last = np.array(path[int(t) - 1])\n pos_next = np.array(path[int(t)])\n pos = (pos_next - pos_last) * (t - int(t)) + pos_last\n return pos\n"
] |
[
[
"numpy.array",
"numpy.linalg.norm",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.transpose",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle"
]
] |
msakai/chainer-differentiable-mpc
|
[
"dba5712f42a684748515d9ad5e2ff2823516c88e"
] |
[
"examples/Boyd_lqr.py"
] |
[
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport sys\nprint(sys.path)\nsys.path.append(\"../lqr\")\nfrom lqr_recursion import LqrRecursion\nimport chainer\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nT =51\nf = None\nn_state =3\nn_ctrl =1\nn_sc = n_ctrl +n_state\nF =chainer.Variable(np.array([(np.array([[\n 1.0,0, 0, 1],\n [1,1.0,0,0],\n [0, 1, 1, 0]])) for i in range(T)])).reshape(T,1,n_state,n_sc,)\nc = chainer.Variable(np.array([(np.array([0,0,0.0,0]).T) for i in range(T)])).reshape(T,1,n_sc,)\n_C = np.array([np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,1]]) for i in range(T-1)])\n_C = np.append(_C , np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,0.00000000000001]]))\nC = chainer.Variable(_C).reshape(T,1,n_sc, n_sc)\nx_init = chainer.Variable(np.array([0.5428, 0.7633,0.3504])).reshape(1,n_state)\n\nC\n\ntest = LqrRecursion(x_init,C,c,F,f,T,n_state,n_ctrl)\n\nKs, ks = test.backward()\n\nk1 =[]\nk2 = []\nfig, ax = plt.subplots()\nfor i in range(T-1):\n k1.append(Ks[i][0][0][0].data)\n k2.append(Ks[i][0][0][1].data)\nmajor_ticks = np.arange(0,T, 2)\nax.grid(which = \"major\", axis = \"x\", color = \"blue\", alpha = 0.8,\n linestyle = \"--\", linewidth = 1)\nax.grid(which = \"major\", axis = \"y\", color = \"green\", alpha = 0.8,\n linestyle = \"--\", linewidth = 1)\nax.set_xticks(major_ticks) \nax.set_ylim(-0.5, 1.2)\nax.plot(k1)\nax.plot(k2)\nax.set_ylim(-2, 0)\nax.set_xlim(0,T)\n\nx,u = test.solve_recursion()\n\n# +\nus = []\nfor i in range(T):\n us.append(x[i][0][0].data)\n \nfig, ax = plt.subplots()\nax.grid(which = \"major\", axis = \"x\", color = \"blue\", alpha = 0.8,\n linestyle = \"--\", linewidth = 1)\n\n# y่ปธใซ็ฎ็็ทใ่จญๅฎ\nax.grid(which = \"major\", axis = \"y\", color = \"green\", alpha = 0.8,\n linestyle = \"--\", linewidth = 1)\n\nmajor_ticks = np.arange(0, 20, 2) \nax.set_xticks(major_ticks) \nax.set_ylim(-2, 1)\nax.set_xlim(0, 20)\nax.plot(us, marker='.')\nplt.show()\n# -\n\nKs\n\nKs\n\nlen(Ks)\n\nx\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.show",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.subplots"
]
] |
aivaras-ciurlionis/meteo
|
[
"434759d16f7cca505d280475611d1fef5176827b"
] |
[
"src/predictionAlgorithms/machineLearning/helpers/callbacks.py"
] |
[
"import keras\nfrom sklearn.metrics import roc_auc_score\nfrom src.predictionAlgorithms.machineLearning.helpers.validation import Validation\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport glob\n\nclass Callbacks(keras.callbacks.Callback):\n validationSequences = []\n algorithm = None\n number = 1\n validation_frequency = 1\n size = 64\n step = 1\n base = 4\n\n def set_step(self, step):\n self.step = step\n return self\n\n def set_base(self, base):\n self.base = base\n return base\n\n def set_size(self, size):\n self.size = size\n return self\n\n def set_validation_frequency(self, frequency):\n self.validation_frequency = frequency\n return self\n\n def set_validation_data(self, validation_data):\n self.validationSequences = validation_data\n return self\n\n def set_algorithm(self, algorithm):\n self.algorithm = algorithm\n return self\n\n def on_train_begin(self, logs={}):\n # Initialize the lists for holding the logs, losses and accuracies\n self.losses = []\n self.acc = []\n self.val_losses = []\n self.val_acc = []\n self.logs = []\n epoch_graphs = glob.glob('../output/*')\n for f in epoch_graphs:\n os.remove(f)\n\n\n def on_train_end(self, logs={}):\n return\n\n def on_epoch_begin(self, epoch, logs={}):\n return\n\n def on_epoch_end(self, epoch, logs={}):\n if self.number % self.validation_frequency != 0:\n self.number += 1\n return\n validation = Validation()\n validation.set_validation_data(self.validationSequences)\\\n .set_dimensions(self.size)\\\n .set_base(self.base)\\\n .set_step(self.step)\\\n .validate(self.algorithm)\n\n self.number += 1\n\n self.logs.append(logs)\n self.losses.append(logs.get('loss'))\n self.acc.append(logs.get('acc'))\n self.val_losses.append(logs.get('val_loss'))\n self.val_acc.append(logs.get('val_acc'))\n\n if len(self.losses) > 1:\n N = np.arange(0, len(self.losses))\n plt.figure()\n plt.plot(N, self.losses, label=\"train_loss\")\n plt.plot(N, self.acc, label=\"train_acc\")\n plt.plot(N, self.val_losses, label=\"val_loss\")\n plt.plot(N, self.val_acc, label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy [Epoch {}]\".format(epoch))\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.savefig('../output/Epoch-{}.png'.format(epoch))\n plt.close()\n return\n\n def on_batch_begin(self, batch, logs={}):\n return\n\n def on_batch_end(self, batch, logs={}):\n return\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel"
]
] |
BuddenD/jax
|
[
"269da0ae584cfe840f34e9f871f13c28e2772de5"
] |
[
"jax/experimental/jax2tf/tests/tf_test_util.py"
] |
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom typing import Any, Callable, Tuple\nimport tensorflow as tf # type: ignore[import]\n\nfrom jax.config import config\nfrom jax import dtypes\nfrom jax.experimental import jax2tf\nfrom jax import test_util as jtu\n\nclass JaxToTfTestCase(jtu.JaxTestCase):\n\n def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):\n \"\"\"Compares dtypes across JAX and TF dtypes. Overrides super method.\"\"\"\n def to_numpy_dtype(dt):\n return dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype\n\n if not config.FLAGS.jax_enable_x64 and canonicalize_dtypes:\n self.assertEqual(dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))),\n dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))\n else:\n self.assertEqual(to_numpy_dtype(jtu._dtype(x)),\n to_numpy_dtype(jtu._dtype(y)))\n\n def ConvertAndCompare(self, func_jax: Callable, *args,\n with_function: bool = False,\n atol=None,\n rtol=None) -> Tuple[Any, Any]:\n \"\"\"Compares jax_func(*args) with convert(jax_func)(*args).\"\"\"\n func_tf = jax2tf.convert(func_jax)\n if with_function:\n func_tf = tf.function(func_tf)\n res_jax = func_jax(*args)\n res_tf = func_tf(*args)\n self.assertAllClose(res_jax, res_tf, atol=atol, rtol=rtol)\n return (res_jax, res_tf)\n"
] |
[
[
"tensorflow.function"
]
] |
Raphael-C-Almeida/Wireless-Sensor-Network
|
[
"8d12b06ddec1b5f3da28fd9b94b43bc4ac4518cf",
"8d12b06ddec1b5f3da28fd9b94b43bc4ac4518cf"
] |
[
"Data Fusion Test/Minimos Quadrados Puro.py",
"Data Fusion Test/Kalman Filter Com Memorizacao.py"
] |
[
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef gen_data(n, start=0, end=10):\r\n x = np.linspace(start, end, n)\r\n y = np.sin(10*x) - x*x\r\n return y\r\n\r\ndef gen_data_osc(n):\r\n return np.array([1024 + (-2)**(-i/100) for i in range(n)])\r\n\r\ndef gen_data_rand(n):\r\n return np.random.randn(n) + 0.3*np.linspace(0, 10, n)\r\n\r\ndef calc_cov(X, Y):\r\n return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)\r\n\r\ndef angular_coef(X,Y):\r\n return calc_cov(X,Y)/calc_cov(X,X)\r\n\r\ndef linear_coef(a, X, Y):\r\n return np.average(Y) - a*np.average(X)\r\n\r\ncount = 100\r\nend = 100\r\ntime = np.linspace(0, end, count)\r\ndata = gen_data(count)\r\n\r\ndelta = end / count\r\n\r\npreds = []\r\nkg_preds = []\r\n\r\nkg_prediction = 0\r\n\r\nfor i in range(1, count):\r\n a = angular_coef(time[:i], data[:i])\r\n b = linear_coef(a, time[:i], data[:i])\r\n\r\n prediction = (time[i]+delta)*a + b\r\n preds.append(prediction)\r\n\r\n avg_X = np.average(time[:i])\r\n avg_Y = np.average(data[:i])\r\n cov = calc_cov(time[:i], data[:i])\r\n\r\nestimate = time*a + b\r\n\r\nplt.scatter(time, data, label=\"Mediรงรตes\", color=\"#FF5850\")\r\nplt.scatter(time[1:], preds, label=\"Est. Min. Quad.\", color=\"#62B21C\")\r\nplt.plot(time, estimate, label=\"Min. Quad. Final\", color=\"#36A1FF\")\r\nplt.xlabel(\"Tempo\")\r\nplt.ylabel(\"Temperatura\")\r\nplt.title(\"Aproximaรงรฃo Por Minimos Quadrados\")\r\n# Place a legend to the right of this smaller subplot.\r\nplt.legend()\r\n\r\nplt.show()",
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\ndef gen_data(n, start=0, end=10):\r\n x = np.linspace(start, end, n)\r\n y = np.sin(10*x) - x*x\r\n return y\r\n\r\ndef gen_data_osc(n):\r\n return np.array([1024 + (-2)**(-i/100) for i in range(n)])\r\n\r\ndef gen_data_rand(n):\r\n return np.random.randn(n) + 0.3*np.linspace(0, 10, n)\r\n\r\ndef calc_cov(X, Y):\r\n return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)\r\n\r\ndef angular_coef(X,Y):\r\n return calc_cov(X,Y)/calc_cov(X,X)\r\n\r\ndef linear_coef(a, X, Y):\r\n return np.average(Y) - a*np.average(X)\r\n\r\ndef kg_coef(est, measurement):\r\n return est / (est + measurement)\r\n\r\ndef kg_iter(prev, measurement):\r\n return prev + kg_coef(prev, measurement) * (measurement - prev)\r\n\r\ncount = 100\r\nend = 100\r\ntime = np.linspace(0, end, count)\r\ndata = gen_data_osc(count)\r\n\r\ndelta = end / count\r\n\r\npreds = []\r\nkg_preds = []\r\n\r\nkg_prediction = 0\r\n\r\nmem_size=20\r\nmemory = []\r\ntime_mem = []\r\n\r\nfor i in range(1, count):\r\n if i == 1:\r\n memory.append(data[i])\r\n time_mem.append(time[i])\r\n if len(memory) >= mem_size:\r\n del memory[0]\r\n del time_mem[0]\r\n\r\n memory.append(data[i])\r\n time_mem.append(time[i])\r\n\r\n a = angular_coef(np.array(time_mem), np.array(memory))\r\n b = linear_coef(a, np.array(time_mem), np.array(memory))\r\n\r\n prediction = (time[i]+delta)*a + b\r\n kg_prediction = kg_iter(prediction, data[i])\r\n preds.append(prediction)\r\n kg_preds.append(kg_prediction)\r\n\r\nestimate = time*a + b\r\n\r\nplt.scatter(time, data, label=\"Mediรงรตes\", color=\"#FF5850\")\r\nplt.scatter(time[1:], preds, label=\"Est. Min. Quad.\", color=\"#62B21C\")\r\nplt.scatter(time[1:], kg_preds, label=\"Est. Kalman\", color=\"#C000FF\")\r\nplt.plot(time, estimate, label=\"Min. Quad. Final\", color=\"#36A1FF\")\r\nplt.xlabel(\"Tempo\")\r\nplt.ylabel(\"Temperatura\")\r\nplt.title(\"Aproximaรงao por Kalman Filter com Memรณria Limitada (%i elementos)\" % mem_size)\r\n# Place a legend to the right of this smaller subplot.\r\nplt.legend()\r\n\r\nplt.show()"
] |
[
[
"numpy.sin",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.random.randn",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.average",
"matplotlib.pyplot.scatter",
"numpy.linspace"
],
[
"numpy.sin",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.random.randn",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.average",
"matplotlib.pyplot.scatter",
"numpy.linspace"
]
] |
hummat/convolutional_occupancy_networks
|
[
"bb351edff59c196e01aa687943e19fee4ac11077"
] |
[
"src/eval.py"
] |
[
"import logging\n\nimport numpy as np\nimport trimesh\n\nfrom src.common import compute_iou\n# from scipy.spatial import cKDTree\nfrom src.utils.libkdtree import KDTree\nfrom src.utils.libmesh import check_mesh_contains\n\n# Maximum values for bounding box [-0.5, 0.5]^3\nEMPTY_PCL_DICT = {\n 'completeness': np.sqrt(3),\n 'accuracy': np.sqrt(3),\n 'completeness2': 3,\n 'accuracy2': 3,\n 'chamfer': 6,\n}\n\nEMPTY_PCL_DICT_NORMALS = {\n 'normals completeness': -1.,\n 'normals accuracy': -1.,\n 'normals': -1.,\n}\n\nlogger = logging.getLogger(__name__)\n\n\nclass MeshEvaluator(object):\n \"\"\" Mesh evaluation class.\n\n It handles the mesh evaluation process.\n\n Args:\n n_points (int): number of points to be used for evaluation\n \"\"\"\n\n def __init__(self, n_points=100000):\n self.n_points = n_points\n\n def eval_mesh(self,\n mesh,\n pointcloud_tgt,\n normals_tgt,\n points_iou,\n occ_tgt,\n remove_wall=False):\n \"\"\" Evaluates a mesh.\n\n Args:\n mesh (trimesh): mesh which should be evaluated\n pointcloud_tgt (numpy array): target point cloud\n normals_tgt (numpy array): target normals\n points_iou (numpy_array): points tensor for IoU evaluation\n occ_tgt (numpy_array): GT occupancy values for IoU points\n \"\"\"\n if len(mesh.vertices) != 0 and len(mesh.faces) != 0:\n if remove_wall: # ! Remove walls and floors\n pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True)\n eps = 0.007\n x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min()\n y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min()\n z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min()\n\n # add small offsets\n x_max, x_min = x_max + eps, x_min - eps\n y_max, y_min = y_max + eps, y_min - eps\n z_max, z_min = z_max + eps, z_min - eps\n\n mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min)\n mask_y = (pointcloud[:, 1] >= y_min) # floor\n mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min)\n\n mask = mask_x & mask_y & mask_z\n pointcloud_new = pointcloud[mask]\n # Subsample \n idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)\n pointcloud = pointcloud_new[idx_new]\n idx = idx[mask][idx_new]\n else:\n pointcloud, idx = mesh.sample(self.n_points, return_index=True)\n\n pointcloud = pointcloud.astype(np.float32)\n normals = mesh.face_normals[idx]\n else:\n pointcloud = np.empty((0, 3))\n normals = np.empty((0, 3))\n\n out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)\n\n if len(mesh.vertices) != 0 and len(mesh.faces) != 0:\n occ = check_mesh_contains(mesh, points_iou)\n\n if occ_tgt.min() < 0:\n occ_tgt = (occ_tgt <= 0).astype(np.float32)\n\n out_dict['iou'] = compute_iou(occ, occ_tgt)\n else:\n out_dict['iou'] = 0.\n\n return out_dict\n\n @staticmethod\n def eval_pointcloud(pointcloud,\n pointcloud_tgt,\n normals=None,\n normals_tgt=None,\n thresholds=np.linspace(1. / 1000, 1, 1000)):\n \"\"\" Evaluates a point cloud.\n\n Args:\n pointcloud (numpy array): predicted point cloud\n pointcloud_tgt (numpy array): target point cloud\n normals (numpy array): predicted normals\n normals_tgt (numpy array): target normals\n thresholds (numpy array): threshold values for the F-score calculation\n \"\"\"\n # Return maximum losses if pointcloud is empty\n if pointcloud.shape[0] == 0:\n logger.warning('Empty pointcloud / mesh detected!')\n out_dict = EMPTY_PCL_DICT.copy()\n if normals is not None and normals_tgt is not None:\n out_dict.update(EMPTY_PCL_DICT_NORMALS)\n return out_dict\n\n pointcloud = np.asarray(pointcloud)\n pointcloud_tgt = np.asarray(pointcloud_tgt)\n\n # Completeness: how far are the points of the target point cloud from the predicted point cloud\n completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)\n recall = get_threshold_percentage(completeness, thresholds)\n completeness2 = completeness ** 2\n\n completeness = completeness.mean()\n completeness2 = completeness2.mean()\n completeness_normals = completeness_normals.mean()\n\n # Accuracy: how far are the points of the predicted pointcloud from the target pointcloud\n accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)\n precision = get_threshold_percentage(accuracy, thresholds)\n accuracy2 = accuracy ** 2\n\n accuracy = accuracy.mean()\n accuracy2 = accuracy2.mean()\n accuracy_normals = accuracy_normals.mean()\n\n # Chamfer distance\n chamferL2 = 0.5 * (completeness2 + accuracy2)\n normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals)\n chamferL1 = 0.5 * (completeness + accuracy)\n\n # F-Score\n F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))]\n\n out_dict = {\n 'completeness': completeness,\n 'accuracy': accuracy,\n 'normals completeness': completeness_normals,\n 'normals accuracy': accuracy_normals,\n 'normals': normals_correctness,\n 'completeness2': completeness2,\n 'accuracy2': accuracy2,\n 'chamfer-L2': chamferL2,\n 'chamfer-L1': chamferL1,\n 'f-score': F[9], # threshold = 1.0%\n 'f-score-15': F[14], # threshold = 1.5%\n 'f-score-20': F[19], # threshold = 2.0%\n }\n\n return out_dict\n\n\ndef distance_p2p(points_src, normals_src, points_tgt, normals_tgt):\n \"\"\" Computes minimal distances of each point in points_src to points_tgt.\n\n Args:\n points_src (numpy array): source points\n normals_src (numpy array): source normals\n points_tgt (numpy array): target points\n normals_tgt (numpy array): target normals\n \"\"\"\n kdtree = KDTree(points_tgt)\n dist, idx = kdtree.query(points_src)\n\n if normals_src is not None and normals_tgt is not None:\n normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)\n normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)\n\n normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)\n # Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation)\n normals_dot_product = np.abs(normals_dot_product)\n else:\n normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32)\n return dist, normals_dot_product\n\n\ndef distance_p2m(points, mesh):\n \"\"\" Compute minimal distances of each point in points to mesh.\n\n Args:\n points (numpy array): points array\n mesh (trimesh): mesh\n\n \"\"\"\n _, dist, _ = trimesh.proximity.closest_point(mesh, points)\n return dist\n\n\ndef get_threshold_percentage(dist, thresholds):\n \"\"\" Evaluates a point cloud.\n\n Args:\n dist (numpy array): calculated distance\n thresholds (numpy array): threshold values for the F-score calculation\n \"\"\"\n in_threshold = [(dist <= t).mean() for t in thresholds]\n return in_threshold\n"
] |
[
[
"numpy.array",
"numpy.linalg.norm",
"numpy.empty",
"numpy.asarray",
"numpy.random.randint",
"numpy.abs",
"numpy.sqrt",
"numpy.linspace"
]
] |
simeoncarstens/probability
|
[
"054a720ff9f221dd9660acd7ce7fb38a1dbb1290",
"054a720ff9f221dd9660acd7ce7fb38a1dbb1290"
] |
[
"tensorflow_probability/python/internal/backend/numpy/numpy_array.py",
"tensorflow_probability/python/optimizer/lbfgs.py"
] |
[
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of TensorFlow general top-level functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n# Dependency imports\nimport numpy as np\n\nfrom tensorflow_probability.python.internal.backend.numpy import _utils as utils\nfrom tensorflow_probability.python.internal.backend.numpy import ops\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import einsum\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import norm\nfrom tensorflow_probability.python.internal.backend.numpy.linalg_impl import tensordot\n\n\n__all__ = [\n 'concat',\n 'einsum',\n 'expand_dims',\n 'fill',\n 'gather',\n 'gather_nd',\n 'linspace',\n 'meshgrid',\n 'norm',\n 'one_hot',\n 'ones',\n 'ones_like',\n 'pad',\n 'range',\n 'rank',\n 'reshape',\n 'reverse',\n 'repeat',\n 'roll',\n 'searchsorted',\n 'shape',\n 'size',\n 'slice',\n 'split',\n 'squeeze',\n 'stack',\n 'tensordot',\n 'tile',\n 'transpose',\n 'unstack',\n 'where',\n 'zeros',\n 'zeros_like',\n # 'boolean_mask',\n # 'foldl',\n # 'foldr',\n]\n\n\nJAX_MODE = False\n\n\nif JAX_MODE:\n import jax # pylint: disable=g-import-not-at-top\n\n\ndef _astuple(x):\n try:\n return tuple(x)\n except TypeError:\n return x\n\n\ndef _gather( # pylint: disable=unused-argument\n params,\n indices,\n validate_indices=None,\n axis=None,\n batch_dims=0,\n name=None):\n \"\"\"gather.\"\"\"\n indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)\n if validate_indices is not None:\n raise NotImplementedError(\n 'Argument `validate_indices != None` is currently unimplemented.')\n if batch_dims < 0:\n raise NotImplementedError('Negative `batch_dims` is currently unsupported.')\n if axis is None:\n axis = batch_dims\n if axis < 0:\n axis = axis + len(params.shape)\n # NOTE: For only the numpy backend, this function could create a single result\n # ndarray and use in-place updates. For the Jax backend, this function\n # vmaps `np.take`.\n if JAX_MODE:\n take = lambda params, indices: np.take(params, indices, # pylint: disable=g-long-lambda\n axis=axis - batch_dims)\n take = functools.reduce(\n lambda g, f: f(g), [jax.vmap] * int(batch_dims),\n take\n )\n return take(params, indices)\n params = ops.convert_to_tensor(params)\n res = np.array([\n np.take(params[i], indices[i], axis=axis - batch_dims)\n for i in np.ndindex(*params.shape[:batch_dims])\n ])\n return np.reshape(\n res,\n params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis+1:])\n\n\ndef _args_to_matching_arrays(args_list, dtype_hint=None):\n \"\"\"Converts a list to array using the first element for dtype.\n\n This method is used to match the behavior of `tf.concat`.\n\n Args:\n args_list: A list or tuple of arguments.\n dtype_hint: An optional hint used when converting the args to tensors.\n Returns:\n A list of tensors.\n \"\"\"\n dtype = None\n for arg in args_list:\n if ops.is_tensor(arg):\n dtype = arg.dtype\n break\n if dtype is None:\n ret = []\n for arg in args_list:\n ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))\n if dtype is None:\n dtype = ret[-1].dtype\n else:\n ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]\n return ret\n\n\ndef _concat(values, axis, name='concat'):\n del name\n if axis is None:\n raise ValueError('None values for `axis` argument not supported.')\n if not isinstance(values, (list, tuple)):\n values = [values]\n if len(values) == 1:\n return values[0]\n values = _args_to_matching_arrays(values)\n return np.concatenate(values, axis=axis)\n\n\ndef _gather_nd_single(params, indices):\n idx = tuple(np.moveaxis(indices, -1, 0))\n return params[idx]\n\n\ndef _gather_nd( # pylint: disable=unused-argument\n params,\n indices,\n batch_dims=0,\n name=None):\n \"\"\"gather_nd.\"\"\"\n indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)\n if batch_dims < 0:\n raise NotImplementedError('Negative `batch_dims` is currently unsupported.')\n if not JAX_MODE and batch_dims > 0:\n raise NotImplementedError(\n '`batch_dims > 0` currently unsupported in NumPy backend.')\n gather_nd_ = _gather_nd_single\n if JAX_MODE:\n gather_nd_ = functools.reduce(\n lambda g, f: f(g), [jax.vmap] * int(batch_dims),\n gather_nd_\n )\n return gather_nd_(params, indices)\n\n\ndef _linspace(start, stop, num, name=None, axis=0): # pylint: disable=unused-argument\n \"\"\"Match TF behavior with np.linspace.\"\"\"\n start = ops.convert_to_tensor(start)\n # Match TF weirdness arising from truediv(int32, int32) = float64\n if np.issubdtype(start.dtype, np.integer):\n start = start.astype(np.float64)\n stop = ops.convert_to_tensor(stop, dtype=start.dtype)\n num = ops.convert_to_tensor(num, dtype_hint=np.int32)\n if not np.issubdtype(num.dtype, np.integer):\n raise TypeError('`num` must be an integer but got {}'.format(num.dtype))\n num = num.astype(np.int32)\n return np.linspace(start, stop, num, axis=axis).astype(start.dtype)\n\n\ndef _one_hot( # pylint: disable=unused-argument\n indices,\n depth,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None,\n name=None):\n \"\"\"One hot.\"\"\"\n if on_value is None:\n on_value = 1\n if off_value is None:\n off_value = 0\n if dtype is None:\n dtype = utils.common_dtype([on_value, off_value], np.float32)\n indices = np.array(indices)\n depth = np.array(depth)\n pred = abs(np.arange(depth, dtype=indices.dtype) -\n indices[..., np.newaxis]) > 0\n y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))\n if axis is not None:\n y_out = np.moveaxis(y_out, -1, axis)\n return y_out\n\n\ndef _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument\n return np.ones_like(input, dtype=utils.numpy_dtype(dtype))\n\n\n# TODO(b/136555907): Add unit-test.\ndef _pad( # pylint: disable=unused-argument\n tensor,\n paddings,\n mode='CONSTANT',\n constant_values=0,\n name=None):\n return np.pad(\n tensor, paddings,\n mode=mode.lower(),\n constant_values=constant_values)\n\n\ndef _range(start, limit=None, delta=1, dtype=None, name='range'): # pylint: disable=unused-argument\n \"\"\"Emulates tf.range.\"\"\"\n # Emulating dtype inference logic from tf.range\n dtype = utils.numpy_dtype(dtype)\n start = ops.convert_to_tensor(start, dtype=dtype)\n limit = None if limit is None else ops.convert_to_tensor(limit, dtype=dtype)\n delta = ops.convert_to_tensor(delta, dtype=dtype)\n if dtype is None:\n dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]\n inferred_dtype = max([arg.dtype for arg in [start, limit, delta]\n if arg is not None],\n key=dtype_hierarchy.index)\n else:\n inferred_dtype = dtype\n return np.arange(start, limit, delta).astype(inferred_dtype)\n\n\ndef _reverse(tensor, axis, name=None): # pylint: disable=unused-argument\n if np.array(axis).ndim == 0:\n return np.flip(tensor, axis)\n for ax in axis:\n tensor = np.flip(tensor, ax)\n return tensor\n\n\nif JAX_MODE:\n _searchsorted_vmap_sides = {\n side: jax.vmap(functools.partial(jax.numpy.searchsorted, side=side))\n for side in ('left', 'right')\n }\n\n\ndef _searchsorted( # pylint: disable=unused-argument\n sorted_sequence,\n values,\n side='left',\n out_type=np.int32,\n name=None):\n \"\"\"Find indices for insertion for list to remain sorted.\"\"\"\n if JAX_MODE:\n try:\n func = _searchsorted_vmap_sides[side]\n except KeyError:\n raise ValueError(\"'%s' is an invalid value for keyword 'side'\" % side)\n sorted_sequence_2d = np.reshape(sorted_sequence,\n (-1, sorted_sequence.shape[-1]))\n values_2d = np.reshape(values, (-1, values.shape[-1]))\n if sorted_sequence_2d.shape[0] != values_2d.shape[0]:\n raise ValueError('Leading dim_size of both tensors must match.')\n return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type),\n values.shape)\n # We don't use np.searchsorted in the numpy backend because it doesn't support\n # batching.\n sorted_sequence = sorted_sequence[..., np.newaxis, :]\n values = values[..., :, np.newaxis]\n if side == 'left':\n is_in_right_location = sorted_sequence < values\n elif side == 'right':\n is_in_right_location = sorted_sequence <= values\n return np.sum(is_in_right_location, axis=-1).astype(out_type)\n\n\ndef _shape(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin,unused-argument\n return ops.convert_to_tensor(ops.convert_to_tensor(input).shape).astype(\n out_type)\n\n\ndef _size(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin, unused-argument\n return np.asarray(np.prod(ops.convert_to_tensor(input).shape), dtype=out_type)\n\n\nbuiltin_slice = slice # pylint: disable=invalid-name\n\n\ndef _slice(input_, begin, size, name=None): # pylint: disable=unused-argument,redefined-outer-name\n slices = tuple(\n builtin_slice(b, b + s if s != -1 else None) for b, s in zip(begin, size))\n return input_[slices]\n\n\ndef _split(value, num_or_size_splits, axis=0, num=None, name='split'): # pylint: disable=unused-argument\n \"\"\"Map tf.split -> np.split.\"\"\"\n indices_or_sections = np.array(num_or_size_splits)\n if indices_or_sections.ndim == 1:\n if any(idx == -1 for idx in indices_or_sections):\n # Numpy parameterizes by split indices and returns nsplits+1 arrays.\n total_splits = sum(idx for idx in indices_or_sections if idx != -1)\n remainder = int(max(0, np.array(value).shape[axis] - total_splits))\n indices_or_sections = [\n idx if idx != -1 else remainder for idx in indices_or_sections\n ]\n indices_or_sections = np.cumsum(np.array(indices_or_sections))[:-1]\n return np.split(value, indices_or_sections, axis)\n\n\ndef _stack(values, axis=0, name='stack'):\n del name\n if axis is None:\n raise ValueError('None values for `axis` argument not supported.')\n values = _args_to_matching_arrays(values)\n return np.stack(values, axis=axis)\n\n\ndef _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument\n x = np.transpose(a, perm)\n return np.conjugate(x) if conjugate else x\n\n\ndef _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument\n return np.zeros_like(input, dtype=utils.numpy_dtype(dtype))\n\n\n# --- Begin Public Functions --------------------------------------------------\n\n\nconcat = utils.copy_docstring(\n 'tf.concat',\n _concat)\n\n\nexpand_dims = utils.copy_docstring(\n 'tf.expand_dims',\n lambda input, axis, name=None: np.expand_dims(input, axis))\n\nfill = utils.copy_docstring(\n 'tf.fill',\n lambda dims, value, name=None: np.full(dims, value))\n\ngather = utils.copy_docstring(\n 'tf.gather',\n _gather)\n\ngather_nd = utils.copy_docstring(\n 'tf.gather_nd',\n _gather_nd)\n\nreverse = utils.copy_docstring('tf.reverse', _reverse)\n\nlinspace = utils.copy_docstring(\n 'tf.linspace',\n _linspace)\n\nmeshgrid = utils.copy_docstring(\n 'tf.meshgrid',\n np.meshgrid)\n\nnorm = utils.copy_docstring(\n 'tf.norm',\n norm)\n\none_hot = utils.copy_docstring(\n 'tf.one_hot',\n _one_hot)\n\nones = utils.copy_docstring(\n 'tf.ones',\n lambda shape, dtype=np.float32, name=None: np.ones( # pylint: disable=g-long-lambda\n shape, utils.numpy_dtype(dtype)))\n\nones_like = utils.copy_docstring(\n 'tf.ones_like',\n _ones_like)\n\npad = utils.copy_docstring(\n 'tf.pad',\n _pad)\n\nrange = utils.copy_docstring( # pylint: disable=redefined-builtin\n 'tf.range',\n _range)\n\nrank = utils.copy_docstring(\n 'tf.rank',\n lambda input, name=None: np.int32(np.array(input).ndim)) # pylint: disable=redefined-builtin,g-long-lambda\n\nrepeat = utils.copy_docstring(\n 'tf.repeat',\n lambda input, repeats, axis=None, name=None: np.repeat( # pylint: disable=g-long-lambda\n input, repeats, axis=axis))\n\nreshape = utils.copy_docstring(\n 'tf.reshape',\n lambda tensor, shape, name=None: np.reshape( # pylint: disable=g-long-lambda\n ops.convert_to_tensor(tensor), shape))\n\nroll = utils.copy_docstring(\n 'tf.roll',\n lambda input, shift, axis: np.roll(input, shift, axis)) # pylint: disable=unnecessary-lambda\n\nsearchsorted = utils.copy_docstring(\n 'tf.searchsorted',\n _searchsorted)\n\nshape = utils.copy_docstring(\n 'tf.shape',\n _shape)\n\nsize = utils.copy_docstring(\n 'tf.size',\n _size)\n\nslice = utils.copy_docstring( # pylint: disable=redefined-builtin\n 'tf.slice', _slice)\n\nsplit = utils.copy_docstring('tf.split', _split)\n\nsqueeze = utils.copy_docstring(\n 'tf.squeeze',\n lambda input, axis=None, name=None: np.squeeze(input, _astuple(axis)))\n\nstack = utils.copy_docstring(\n 'tf.stack',\n _stack)\n\ntile = utils.copy_docstring(\n 'tf.tile',\n lambda input, multiples, name=None: np.tile(np.array(input), multiples))\n\ntranspose = utils.copy_docstring(\n 'tf.transpose',\n _transpose)\n\nunstack = utils.copy_docstring(\n 'tf.unstack',\n lambda value, num=None, axis=0, name='unstack': tuple( # pylint: disable=g-long-lambda\n np.squeeze(x, axis=axis) for x in\n np.split(value, value.shape[axis] if num is None else num, axis)))\n\nwhere = utils.copy_docstring(\n 'tf.where',\n lambda condition, x=None, y=None, name=None: np.where(condition, x, y))\n\nzeros = utils.copy_docstring(\n 'tf.zeros',\n lambda shape, dtype=np.float32, name=None: np.zeros( # pylint: disable=g-long-lambda\n shape, utils.numpy_dtype(dtype)))\n\nzeros_like = utils.copy_docstring(\n 'tf.zeros_like',\n _zeros_like)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Limited-Memory BFGS minimization algorithm.\n\nLimited-memory quasi-Newton methods are useful for solving large problems\nwhose Hessian matrices cannot be computed at a reasonable cost or are not\nsparse. Instead of storing fully dense n x n approximations of Hessian\nmatrices, they only save a few vectors of length n that represent the\napproximations implicitly.\n\nThis module implements the algorithm known as L-BFGS, which, as its name\nsuggests, is a limited-memory version of the BFGS algorithm.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n# Dependency imports\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.optimizer import bfgs_utils\n\n\nLBfgsOptimizerResults = collections.namedtuple(\n 'LBfgsOptimizerResults', [\n 'converged', # Scalar boolean tensor indicating whether the minimum\n # was found within tolerance.\n 'failed', # Scalar boolean tensor indicating whether a line search\n # step failed to find a suitable step size satisfying Wolfe\n # conditions. In the absence of any constraints on the\n # number of objective evaluations permitted, this value will\n # be the complement of `converged`. However, if there is\n # a constraint and the search stopped due to available\n # evaluations being exhausted, both `failed` and `converged`\n # will be simultaneously False.\n 'num_iterations', # The number of iterations of the BFGS update.\n 'num_objective_evaluations', # The total number of objective\n # evaluations performed.\n 'position', # A tensor containing the last argument value found\n # during the search. If the search converged, then\n # this value is the argmin of the objective function.\n 'objective_value', # A tensor containing the value of the objective\n # function at the `position`. If the search\n # converged, then this is the (local) minimum of\n # the objective function.\n 'objective_gradient', # A tensor containing the gradient of the\n # objective function at the\n # `final_position`. If the search converged\n # the max-norm of this tensor should be\n # below the tolerance.\n 'position_deltas', # A tensor encoding information about the latest\n # changes in `position` during the algorithm\n # execution. Its shape is of the form\n # `(num_correction_pairs,) + position.shape` where\n # `num_correction_pairs` is given as an argument to\n # the minimize function.\n 'gradient_deltas', # A tensor encoding information about the latest\n # changes in `objective_gradient` during the\n # algorithm execution. Has the same shape as\n # position_deltas.\n ])\n\n\ndef minimize(value_and_gradients_function,\n initial_position,\n previous_optimizer_results=None,\n num_correction_pairs=10,\n tolerance=1e-8,\n x_tolerance=0,\n f_relative_tolerance=0,\n initial_inverse_hessian_estimate=None,\n max_iterations=50,\n parallel_iterations=1,\n stopping_condition=None,\n max_line_search_iterations=50,\n name=None):\n \"\"\"Applies the L-BFGS algorithm to minimize a differentiable function.\n\n Performs unconstrained minimization of a differentiable function using the\n L-BFGS scheme. See [Nocedal and Wright(2006)][1] for details of the algorithm.\n\n ### Usage:\n\n The following example demonstrates the L-BFGS optimizer attempting to find the\n minimum for a simple high-dimensional quadratic objective function.\n\n ```python\n # A high-dimensional quadratic bowl.\n ndims = 60\n minimum = np.ones([ndims], dtype='float64')\n scales = np.arange(ndims, dtype='float64') + 1.0\n\n # The objective function and the gradient.\n def quadratic_loss_and_gradient(x):\n return tfp.math.value_and_gradient(\n lambda x: tf.reduce_sum(\n scales * tf.math.squared_difference(x, minimum), axis=-1),\n x)\n start = np.arange(ndims, 0, -1, dtype='float64')\n optim_results = tfp.optimizer.lbfgs_minimize(\n quadratic_loss_and_gradient,\n initial_position=start,\n num_correction_pairs=10,\n tolerance=1e-8)\n\n # Check that the search converged\n assert(optim_results.converged)\n # Check that the argmin is close to the actual value.\n np.testing.assert_allclose(optim_results.position, minimum)\n ```\n\n ### References:\n\n [1] Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series\n in Operations Research. pp 176-180. 2006\n\n http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf\n\n Args:\n value_and_gradients_function: A Python callable that accepts a point as a\n real `Tensor` and returns a tuple of `Tensor`s of real dtype containing\n the value of the function and its gradient at that point. The function\n to be minimized. The input is of shape `[..., n]`, where `n` is the size\n of the domain of input points, and all others are batching dimensions.\n The first component of the return value is a real `Tensor` of matching\n shape `[...]`. The second component (the gradient) is also of shape\n `[..., n]` like the input value to the function.\n initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or\n points when using batching dimensions, of the search procedure. At these\n points the function value and the gradient norm should be finite.\n Exactly one of `initial_position` and `previous_optimizer_results` can be\n non-None.\n previous_optimizer_results: An `LBfgsOptimizerResults` namedtuple to\n intialize the optimizer state from, instead of an `initial_position`.\n This can be passed in from a previous return value to resume optimization\n with a different `stopping_condition`. Exactly one of `initial_position`\n and `previous_optimizer_results` can be non-None.\n num_correction_pairs: Positive integer. Specifies the maximum number of\n (position_delta, gradient_delta) correction pairs to keep as implicit\n approximation of the Hessian matrix.\n tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance\n for the procedure. If the supremum norm of the gradient vector is below\n this number, the algorithm is stopped.\n x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the\n position between one iteration and the next is smaller than this number,\n the algorithm is stopped.\n f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change\n in the objective value between one iteration and the next is smaller\n than this value, the algorithm is stopped.\n initial_inverse_hessian_estimate: None. Option currently not supported.\n max_iterations: Scalar positive int32 `Tensor`. The maximum number of\n iterations for L-BFGS updates.\n parallel_iterations: Positive integer. The number of iterations allowed to\n run in parallel.\n stopping_condition: (Optional) A Python function that takes as input two\n Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor.\n The input tensors are `converged` and `failed`, indicating the current\n status of each respective batch member; the return value states whether\n the algorithm should stop. The default is tfp.optimizer.converged_all\n which only stops when all batch members have either converged or failed.\n An alternative is tfp.optimizer.converged_any which stops as soon as one\n batch member has converged, or when all have failed.\n max_line_search_iterations: Python int. The maximum number of iterations\n for the `hager_zhang` line search algorithm.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'minimize' is used.\n\n Returns:\n optimizer_results: A namedtuple containing the following items:\n converged: Scalar boolean tensor indicating whether the minimum was\n found within tolerance.\n failed: Scalar boolean tensor indicating whether a line search\n step failed to find a suitable step size satisfying Wolfe\n conditions. In the absence of any constraints on the\n number of objective evaluations permitted, this value will\n be the complement of `converged`. However, if there is\n a constraint and the search stopped due to available\n evaluations being exhausted, both `failed` and `converged`\n will be simultaneously False.\n num_objective_evaluations: The total number of objective\n evaluations performed.\n position: A tensor containing the last argument value found\n during the search. If the search converged, then\n this value is the argmin of the objective function.\n objective_value: A tensor containing the value of the objective\n function at the `position`. If the search converged, then this is\n the (local) minimum of the objective function.\n objective_gradient: A tensor containing the gradient of the objective\n function at the `position`. If the search converged the\n max-norm of this tensor should be below the tolerance.\n position_deltas: A tensor encoding information about the latest\n changes in `position` during the algorithm execution.\n gradient_deltas: A tensor encoding information about the latest\n changes in `objective_gradient` during the algorithm execution.\n \"\"\"\n if initial_inverse_hessian_estimate is not None:\n raise NotImplementedError(\n 'Support of initial_inverse_hessian_estimate arg not yet implemented')\n\n if stopping_condition is None:\n stopping_condition = bfgs_utils.converged_all\n\n with tf.name_scope(name or 'minimize'):\n if (initial_position is None) == (previous_optimizer_results is None):\n raise ValueError(\n 'Exactly one of `initial_position` or '\n '`previous_optimizer_results` may be specified.')\n\n if initial_position is not None:\n initial_position = tf.convert_to_tensor(\n initial_position, name='initial_position')\n dtype = dtype_util.base_dtype(initial_position.dtype)\n\n if previous_optimizer_results is not None:\n dtype = dtype_util.base_dtype(previous_optimizer_results.position.dtype)\n\n tolerance = tf.convert_to_tensor(\n tolerance, dtype=dtype, name='grad_tolerance')\n f_relative_tolerance = tf.convert_to_tensor(\n f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')\n x_tolerance = tf.convert_to_tensor(\n x_tolerance, dtype=dtype, name='x_tolerance')\n max_iterations = tf.convert_to_tensor(max_iterations, name='max_iterations')\n\n # The `state` here is a `LBfgsOptimizerResults` tuple with values for the\n # current state of the algorithm computation.\n def _cond(state):\n \"\"\"Continue if iterations remain and stopping condition is not met.\"\"\"\n return ((state.num_iterations < max_iterations) &\n tf.logical_not(stopping_condition(state.converged, state.failed)))\n\n def _body(current_state):\n \"\"\"Main optimization loop.\"\"\"\n search_direction = _get_search_direction(current_state)\n\n # TODO(b/120134934): Check if the derivative at the start point is not\n # negative, if so then reset position/gradient deltas and recompute\n # search direction.\n\n next_state = bfgs_utils.line_search_step(\n current_state,\n value_and_gradients_function, search_direction,\n tolerance, f_relative_tolerance, x_tolerance, stopping_condition,\n max_line_search_iterations)\n\n # If not failed or converged, update the Hessian estimate.\n should_update = ~(next_state.converged | next_state.failed)\n state_after_inv_hessian_update = bfgs_utils.update_fields(\n next_state,\n position_deltas=_queue_push(\n current_state.position_deltas, should_update,\n next_state.position - current_state.position),\n gradient_deltas=_queue_push(\n current_state.gradient_deltas, should_update,\n next_state.objective_gradient - current_state.objective_gradient))\n return [state_after_inv_hessian_update]\n\n if previous_optimizer_results is None:\n assert initial_position is not None\n initial_state = _get_initial_state(value_and_gradients_function,\n initial_position,\n num_correction_pairs,\n tolerance)\n else:\n initial_state = previous_optimizer_results\n\n return tf.while_loop(\n cond=_cond,\n body=_body,\n loop_vars=[initial_state],\n parallel_iterations=parallel_iterations)[0]\n\n\ndef _get_initial_state(value_and_gradients_function,\n initial_position,\n num_correction_pairs,\n tolerance):\n \"\"\"Create LBfgsOptimizerResults with initial state of search procedure.\"\"\"\n init_args = bfgs_utils.get_initial_state_args(\n value_and_gradients_function,\n initial_position,\n tolerance)\n empty_queue = _make_empty_queue_for(num_correction_pairs, initial_position)\n init_args.update(position_deltas=empty_queue, gradient_deltas=empty_queue)\n return LBfgsOptimizerResults(**init_args)\n\n\ndef _get_search_direction(state):\n \"\"\"Computes the search direction to follow at the current state.\n\n On the `k`-th iteration of the main L-BFGS algorithm, the state has collected\n the most recent `m` correction pairs in position_deltas and gradient_deltas,\n where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`.\n\n Assuming these, the code below is an implementation of the L-BFGS two-loop\n recursion algorithm given by [Nocedal and Wright(2006)][1]:\n\n ```None\n q_direction = objective_gradient\n for i in reversed(range(m)): # First loop.\n inv_rho[i] = gradient_deltas[i]^T * position_deltas[i]\n alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i]\n q_direction = q_direction - alpha[i] * gradient_deltas[i]\n\n kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] /\n gradient_deltas[-1]^T * gradient_deltas[-1])\n r_direction = kth_inv_hessian_factor * I * q_direction\n\n for i in range(m): # Second loop.\n beta = gradient_deltas[i]^T * r_direction / inv_rho[i]\n r_direction = r_direction + position_deltas[i] * (alpha[i] - beta)\n\n return -r_direction # Approximates - H_k * objective_gradient.\n ```\n\n Args:\n state: A `LBfgsOptimizerResults` tuple with the current state of the\n search procedure.\n\n Returns:\n A real `Tensor` of the same shape as the `state.position`. The direction\n along which to perform line search.\n \"\"\"\n # The number of correction pairs that have been collected so far.\n num_elements = tf.minimum(\n state.num_iterations,\n distribution_util.prefer_static_shape(state.position_deltas)[0])\n\n def _two_loop_algorithm():\n \"\"\"L-BFGS two-loop algorithm.\"\"\"\n # Correction pairs are always appended to the end, so only the latest\n # `num_elements` vectors have valid position/gradient deltas. Vectors\n # that haven't been computed yet are zero.\n position_deltas = state.position_deltas\n gradient_deltas = state.gradient_deltas\n\n # Pre-compute all `inv_rho[i]`s.\n inv_rhos = tf.reduce_sum(\n gradient_deltas * position_deltas, axis=-1)\n\n def first_loop(acc, args):\n _, q_direction = acc\n position_delta, gradient_delta, inv_rho = args\n alpha = tf.math.divide_no_nan(\n tf.reduce_sum(position_delta * q_direction, axis=-1), inv_rho)\n direction_delta = alpha[..., tf.newaxis] * gradient_delta\n return (alpha, q_direction - direction_delta)\n\n # Run first loop body computing and collecting `alpha[i]`s, while also\n # computing the updated `q_direction` at each step.\n zero = tf.zeros_like(inv_rhos[-num_elements])\n alphas, q_directions = tf.scan(\n first_loop, [position_deltas, gradient_deltas, inv_rhos],\n initializer=(zero, state.objective_gradient), reverse=True)\n\n # We use `H^0_k = gamma_k * I` as an estimate for the initial inverse\n # hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.\n gamma_k = inv_rhos[-1] / tf.reduce_sum(\n gradient_deltas[-1] * gradient_deltas[-1], axis=-1)\n r_direction = gamma_k[..., tf.newaxis] * q_directions[-num_elements]\n\n def second_loop(r_direction, args):\n alpha, position_delta, gradient_delta, inv_rho = args\n beta = tf.math.divide_no_nan(\n tf.reduce_sum(gradient_delta * r_direction, axis=-1), inv_rho)\n direction_delta = (alpha - beta)[..., tf.newaxis] * position_delta\n return r_direction + direction_delta\n\n # Finally, run second loop body computing the updated `r_direction` at each\n # step.\n r_directions = tf.scan(\n second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos],\n initializer=r_direction)\n return -r_directions[-1]\n\n return prefer_static.cond(tf.equal(num_elements, 0),\n (lambda: -state.objective_gradient),\n _two_loop_algorithm)\n\n\ndef _make_empty_queue_for(k, element):\n \"\"\"Creates a `tf.Tensor` suitable to hold `k` element-shaped tensors.\n\n For example:\n\n ```python\n element = tf.constant([[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]])\n\n # A queue capable of holding 3 elements.\n _make_empty_queue_for(3, element)\n # => [[[ 0., 0., 0., 0., 0.],\n # [ 0., 0., 0., 0., 0.]],\n #\n # [[ 0., 0., 0., 0., 0.],\n # [ 0., 0., 0., 0., 0.]],\n #\n # [[ 0., 0., 0., 0., 0.],\n # [ 0., 0., 0., 0., 0.]]]\n ```\n\n Args:\n k: A positive scalar integer, number of elements that each queue will hold.\n element: A `tf.Tensor`, only its shape and dtype information are relevant.\n\n Returns:\n A zero-filed `tf.Tensor` of shape `(k,) + tf.shape(element)` and same dtype\n as `element`.\n \"\"\"\n queue_shape = tf.concat(\n [[k], distribution_util.prefer_static_shape(element)], axis=0)\n return tf.zeros(queue_shape, dtype=dtype_util.base_dtype(element.dtype))\n\n\ndef _queue_push(queue, should_update, new_vecs):\n \"\"\"Conditionally push new vectors into a batch of first-in-first-out queues.\n\n The `queue` of shape `[k, ..., n]` can be thought of as a batch of queues,\n each holding `k` n-D vectors; while `new_vecs` of shape `[..., n]` is a\n fresh new batch of n-D vectors. The `should_update` batch of Boolean scalars,\n i.e. shape `[...]`, indicates batch members whose corresponding n-D vector in\n `new_vecs` should be added at the back of its queue, pushing out the\n corresponding n-D vector from the front. Batch members in `new_vecs` for\n which `should_update` is False are ignored.\n\n Note: the choice of placing `k` at the dimension 0 of the queue is\n constrained by the L-BFGS two-loop algorithm above. The algorithm uses\n tf.scan to iterate over the `k` correction pairs simulatneously across all\n batches, and tf.scan itself can only iterate over dimension 0.\n\n For example:\n\n ```python\n k, b, n = (3, 2, 5)\n queue = tf.reshape(tf.range(30), (k, b, n))\n # => [[[ 0, 1, 2, 3, 4],\n # [ 5, 6, 7, 8, 9]],\n #\n # [[10, 11, 12, 13, 14],\n # [15, 16, 17, 18, 19]],\n #\n # [[20, 21, 22, 23, 24],\n # [25, 26, 27, 28, 29]]]\n\n element = tf.reshape(tf.range(30, 40), (b, n))\n # => [[30, 31, 32, 33, 34],\n [35, 36, 37, 38, 39]]\n\n should_update = tf.constant([True, False]) # Shape: (b,)\n\n _queue_add(should_update, queue, element)\n # => [[[10, 11, 12, 13, 14],\n # [ 5, 6, 7, 8, 9]],\n #\n # [[20, 21, 22, 23, 24],\n # [15, 16, 17, 18, 19]],\n #\n # [[30, 31, 32, 33, 34],\n # [25, 26, 27, 28, 29]]]\n ```\n\n Args:\n queue: A `tf.Tensor` of shape `[k, ..., n]`; a batch of queues each with\n `k` n-D vectors.\n should_update: A Boolean `tf.Tensor` of shape `[...]` indicating batch\n members where new vectors should be added to their queues.\n new_vecs: A `tf.Tensor` of shape `[..., n]`; a batch of n-D vectors to add\n at the end of their respective queues, pushing out the first element from\n each.\n\n Returns:\n A new `tf.Tensor` of shape `[k, ..., n]`.\n \"\"\"\n new_queue = tf.concat([queue[1:], [new_vecs]], axis=0)\n return tf.where(\n should_update[tf.newaxis, ..., tf.newaxis], new_queue, queue)\n"
] |
[
[
"numpy.where",
"numpy.conjugate",
"numpy.issubdtype",
"numpy.concatenate",
"numpy.full",
"numpy.ndindex",
"numpy.take",
"numpy.transpose",
"numpy.arange",
"numpy.expand_dims",
"numpy.array",
"numpy.reshape",
"numpy.roll",
"numpy.stack",
"numpy.squeeze",
"numpy.sum",
"numpy.split",
"numpy.repeat",
"numpy.moveaxis",
"numpy.linspace",
"numpy.flip"
],
[
"tensorflow.compat.v2.scan",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.while_loop",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.equal"
]
] |
vihank/gym-donkeycar
|
[
"2e3cd780d92f3d5da5a0f9c67272a0cd3f08befe"
] |
[
"examples/test_cam_config.py"
] |
[
"\"\"\" Test the gym's code for configuring the DonkeyCar's camera settings.\n\"\"\"\n\nimport os\nimport argparse\nimport gym\nimport gym_donkeycar\nimport numpy as np\nimport uuid\n\nif __name__ == \"__main__\":\n\n # Initialize the donkey environment\n # where env_name one of:\n env_list = [\n \"donkey-warehouse-v0\",\n \"donkey-generated-roads-v0\",\n \"donkey-avc-sparkfun-v0\",\n \"donkey-generated-track-v0\",\n \"donkey-roboracingleague-track-v0\",\n \"donkey-waveshare-v0\"\n ]\n\n parser = argparse.ArgumentParser(description='gym_test')\n parser.add_argument('--sim', type=str, default=\"sim_path\",\n help='path to unity simulator. maybe be left at default if you would like to start the sim on your own.')\n parser.add_argument('--port', type=int, default=9091,\n help='port to use for websockets')\n parser.add_argument('--env_name', type=str, default='donkey-warehouse-v0',\n help='name of donkey sim environment', choices=env_list)\n\n args = parser.parse_args()\n\n#%% SET UP ENVIRONMENT\n\n cam = (256,256,3)\n \n conf = {\"exe_path\" : args.sim, \n \"host\" : \"127.0.0.1\",\n \"port\" : args.port,\n\n \"body_style\" : \"donkey\",\n \"body_rgb\" : (128, 128, 128),\n \"car_name\" : \"me\",\n \"font_size\" : 100,\n\n \"racer_name\" : \"test\",\n \"country\" : \"USA\",\n \"bio\" : \"I am test client\",\n \"guid\" : str(uuid.uuid4()),\n\n \"cam_resolution\" : cam,\n \"img_w\" : cam[0],\n \"img_h\" : cam[1],\n \"img_d\" : cam[2],\n }\n\n env = gym.make(args.env_name, conf=conf)\n\n print( \"Env cam size: {}\".format( env.viewer.get_sensor_size() ) )\n\n speed = 0.5\n steer = 0.0\n max_steer = 1.0\n\n#%% PLAY\n obv = env.reset()\n for t in range(100):\n action = np.array([steer,speed]) # drive straight with small speed\n try:\n obv, reward, done, info = env.step(action)\n except Exception as ex:\n print( \"Exception: {}\".format( ex ) )\n\n if obv.shape != cam:\n print( \"Invalid Image size: {}\".format( obv.shape ) )\n elif t == 10:\n print( \"Actual camera size: {}\".format( obv.shape ) )\n\n if done or (info['hit'] is True):\n obv = env.reset()\n print( \"Exiting d/h: {}/{}\".format( done, info['hit'] ) )\n break\n\n env.close()\n"
] |
[
[
"numpy.array"
]
] |
Cristian-Malinescu/qiskit-aqua
|
[
"b29596800447c3130a20ec72a18b7fd8ed9fdb2f"
] |
[
"qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A recursive minimal eigen optimizer in Qiskit's optimization module.\"\"\"\n\nfrom copy import deepcopy\nfrom enum import Enum\nfrom typing import Optional, Union, List, Tuple, Dict\nimport logging\nimport numpy as np\n\nfrom qiskit.aqua.algorithms import NumPyMinimumEigensolver\nfrom qiskit.aqua.utils.validation import validate_min\n\nfrom .optimization_algorithm import OptimizationAlgorithm, OptimizationResult\nfrom .minimum_eigen_optimizer import MinimumEigenOptimizer, MinimumEigenOptimizationResult\nfrom ..converters.quadratic_program_to_qubo import QuadraticProgramToQubo\nfrom ..exceptions import QiskitOptimizationError\nfrom ..problems import Variable\nfrom ..problems.quadratic_program import QuadraticProgram\n\nlogger = logging.getLogger(__name__)\n\n\nclass IntermediateResult(Enum):\n \"\"\"\n Defines whether the intermediate results of\n :class:`~qiskit.optimization.algorithms.RecursiveMinimumEigenOptimizer`\n at each iteration should be stored and returned to the end user.\n \"\"\"\n\n NO_ITERATIONS = 0\n \"\"\"No intermediate results are stored.\"\"\"\n\n LAST_ITERATION = 1\n \"\"\"Only results from the last iteration are stored.\"\"\"\n\n ALL_ITERATIONS = 2\n \"\"\"All intermediate results are stored.\"\"\"\n\n\nclass RecursiveMinimumEigenOptimizationResult(OptimizationResult):\n \"\"\"Recursive Eigen Optimizer Result.\"\"\"\n def __init__(self, x: Union[List[float], np.ndarray], fval: float,\n variables: List[Variable],\n replacements: Dict[str, Tuple[str, int]],\n history: Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]) -> None:\n \"\"\"\n Constructs an instance of the result class.\n\n Args:\n x: the optimal value found in the optimization.\n fval: the optimal function value.\n variables: the list of variables of the optimization problem.\n replacements: a dictionary of substituted variables. Key is a variable being\n substituted, value is a tuple of substituting variable and a weight, either 1 or -1.\n history: a tuple containing intermediate results. The first element is a list of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by\n invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively,\n the second element is an instance of\n :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step\n via `min_num_vars_optimizer`.\n \"\"\"\n super().__init__(x, fval, variables, None)\n self._replacements = replacements\n self._history = history\n\n @property\n def replacements(self) -> Dict[str, Tuple[str, int]]:\n \"\"\"\n Returns a dictionary of substituted variables. Key is a variable being substituted, value\n is a tuple of substituting variable and a weight, either 1 or -1.\"\"\"\n return self._replacements\n\n @property\n def history(self) -> Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]:\n \"\"\"\n Returns intermediate results. The first element is a list of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second\n element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult`\n obtained at the last step via `min_num_vars_optimizer`.\n \"\"\"\n return self._history\n\n\nclass RecursiveMinimumEigenOptimizer(OptimizationAlgorithm):\n \"\"\"A meta-algorithm that applies a recursive optimization.\n\n The recursive minimum eigen optimizer applies a recursive optimization on top of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer`.\n The algorithm is introduced in [1].\n\n Examples:\n Outline of how to use this class:\n\n .. code-block::\n\n from qiskit.aqua.algorithms import QAOA\n from qiskit.optimization.problems import QuadraticProgram\n from qiskit.optimization.algorithms import RecursiveMinimumEigenOptimizer\n problem = QuadraticProgram()\n # specify problem here\n # specify minimum eigen solver to be used, e.g., QAOA\n qaoa = QAOA(...)\n optimizer = RecursiveMinimumEigenOptimizer(qaoa)\n result = optimizer.solve(problem)\n\n References:\n [1]: Bravyi et al. (2019), Obstacles to State Preparation and Variational Optimization\n from Symmetry Protection. http://arxiv.org/abs/1910.08980.\n \"\"\"\n\n def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int = 1,\n min_num_vars_optimizer: Optional[OptimizationAlgorithm] = None,\n penalty: Optional[float] = None,\n history: Optional[IntermediateResult] = IntermediateResult.LAST_ITERATION) -> None:\n \"\"\" Initializes the recursive minimum eigen optimizer.\n\n This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to\n to apply the iterative scheme, and the optimizer to be applied once the threshold number of\n variables is reached.\n\n Args:\n min_eigen_optimizer: The eigen optimizer to use in every iteration.\n min_num_vars: The minimum number of variables to apply the recursive scheme. If this\n threshold is reached, the min_num_vars_optimizer is used.\n min_num_vars_optimizer: This optimizer is used after the recursive scheme for the\n problem with the remaining variables.\n penalty: The factor that is used to scale the penalty terms corresponding to linear\n equality constraints.\n history: Whether the intermediate results are stored.\n Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`.\n\n Raises:\n QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1).\n \"\"\"\n\n validate_min('min_num_vars', min_num_vars, 1)\n\n self._min_eigen_optimizer = min_eigen_optimizer\n self._min_num_vars = min_num_vars\n if min_num_vars_optimizer:\n self._min_num_vars_optimizer = min_num_vars_optimizer\n else:\n self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())\n self._penalty = penalty\n self._history = history\n self._qubo_converter = QuadraticProgramToQubo()\n\n def get_compatibility_msg(self, problem: QuadraticProgram) -> str:\n \"\"\"Checks whether a given problem can be solved with this optimizer.\n\n Checks whether the given problem is compatible, i.e., whether the problem can be converted\n to a QUBO, and otherwise, returns a message explaining the incompatibility.\n\n Args:\n problem: The optimization problem to check compatibility.\n\n Returns:\n A message describing the incompatibility.\n \"\"\"\n return QuadraticProgramToQubo.get_compatibility_msg(problem)\n\n def solve(self, problem: QuadraticProgram) -> OptimizationResult:\n \"\"\"Tries to solve the given problem using the recursive optimizer.\n\n Runs the optimizer to try to solve the optimization problem.\n\n Args:\n problem: The problem to be solved.\n\n Returns:\n The result of the optimizer applied to the problem.\n\n Raises:\n QiskitOptimizationError: Incompatible problem.\n QiskitOptimizationError: Infeasible due to variable substitution\n \"\"\"\n self._verify_compatibility(problem)\n\n # convert problem to QUBO, this implicitly checks if the problem is compatible\n problem_ = self._qubo_converter.convert(problem)\n problem_ref = deepcopy(problem_)\n\n # run recursive optimization until the resulting problem is small enough\n replacements = {} # type: Dict[str, Tuple[str, int]]\n min_eigen_results = [] # type: List[MinimumEigenOptimizationResult]\n while problem_.get_num_vars() > self._min_num_vars:\n\n # solve current problem with optimizer\n res = self._min_eigen_optimizer.solve(problem_) # type: MinimumEigenOptimizationResult\n if self._history == IntermediateResult.ALL_ITERATIONS:\n min_eigen_results.append(res)\n\n # analyze results to get strongest correlation\n correlations = res.get_correlations()\n i, j = self._find_strongest_correlation(correlations)\n\n x_i = problem_.variables[i].name\n x_j = problem_.variables[j].name\n if correlations[i, j] > 0:\n # set x_i = x_j\n problem_ = problem_.substitute_variables(variables={i: (j, 1)})\n if problem_.status == QuadraticProgram.Status.INFEASIBLE:\n raise QiskitOptimizationError('Infeasible due to variable substitution')\n replacements[x_i] = (x_j, 1)\n else:\n # set x_i = 1 - x_j, this is done in two steps:\n # 1. set x_i = 1 + x_i\n # 2. set x_i = -x_j\n\n # 1a. get additional offset\n constant = problem_.objective.constant\n constant += problem_.objective.linear[i]\n constant += problem_.objective.quadratic[i, i]\n problem_.objective.constant = constant\n\n # 1b. get additional linear part\n for k in range(problem_.get_num_vars()):\n coeff = problem_.objective.linear[k]\n if k == i:\n coeff += 2*problem_.objective.quadratic[i, k]\n else:\n coeff += problem_.objective.quadratic[i, k]\n\n # set new coefficient if not too small\n if np.abs(coeff) > 1e-10:\n problem_.objective.linear[k] = coeff\n else:\n problem_.objective.linear[k] = 0\n\n # 2. replace x_i by -x_j\n problem_ = problem_.substitute_variables(variables={i: (j, -1)})\n if problem_.status == QuadraticProgram.Status.INFEASIBLE:\n raise QiskitOptimizationError('Infeasible due to variable substitution')\n replacements[x_i] = (x_j, -1)\n\n # solve remaining problem\n result = self._min_num_vars_optimizer.solve(problem_)\n\n # unroll replacements\n var_values = {}\n for i, x in enumerate(problem_.variables):\n var_values[x.name] = result.x[i]\n\n def find_value(x, replacements, var_values):\n if x in var_values:\n # if value for variable is known, return it\n return var_values[x]\n elif x in replacements:\n # get replacement for variable\n (y, sgn) = replacements[x]\n # find details for replacing variable\n value = find_value(y, replacements, var_values)\n # construct, set, and return new value\n var_values[x] = value if sgn == 1 else 1 - value\n return var_values[x]\n else:\n raise QiskitOptimizationError('Invalid values!')\n\n # loop over all variables to set their values\n for x_i in problem_ref.variables:\n if x_i.name not in var_values:\n find_value(x_i.name, replacements, var_values)\n\n # build history before any translations are applied\n # min_eigen_results is an empty list if history is set to NO or LAST.\n history = (min_eigen_results,\n None if self._history == IntermediateResult.NO_ITERATIONS else result)\n\n # construct result\n x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables]\n fval = result.fval\n result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables)\n result = self._qubo_converter.interpret(result)\n\n return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval,\n variables=result.variables,\n replacements=replacements,\n history=history)\n\n def _find_strongest_correlation(self, correlations):\n\n # get absolute values and set diagonal to -1 to make sure maximum is always on off-diagonal\n abs_correlations = np.abs(correlations)\n for i in range(len(correlations)):\n abs_correlations[i, i] = -1\n\n # get index of maximum (by construction on off-diagonal)\n m_max = np.argmax(abs_correlations.flatten())\n\n # translate back to indices\n i = int(m_max // len(correlations))\n j = int(m_max - i*len(correlations))\n return (i, j)\n"
] |
[
[
"numpy.abs"
]
] |
Renovamen/Image-Captioning
|
[
"de8d4f553a22e967fa56a01d5b4a2206b9431771"
] |
[
"trainer/trainer.py"
] |
[
"import time\nfrom typing import Optional, Dict\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nfrom utils import TensorboardWriter, AverageMeter, save_checkpoint, accuracy, \\\n clip_gradient, adjust_learning_rate\nfrom metrics import Metrics\n\nclass Trainer:\n \"\"\"\n Encoder-decoder pipeline. Tearcher Forcing is used during training and validation.\n\n Parameters\n ----------\n caption_model : str\n Type of the caption model\n\n epochs : int\n We should train the model for __ epochs\n\n device : torch.device\n Use GPU or not\n\n word_map : Dict[str, int]\n Word2id map\n\n rev_word_map : Dict[int, str]\n Id2word map\n\n start_epoch : int\n We should start training the model from __th epoch\n\n epochs_since_improvement : int\n Number of epochs since last improvement in BLEU-4 score\n\n best_bleu4 : float\n Best BLEU-4 score until now\n\n train_loader : DataLoader\n DataLoader for training data\n\n val_loader : DataLoader\n DataLoader for validation data\n\n encoder : nn.Module\n Encoder (based on CNN)\n\n decoder : nn.Module\n Decoder (based on LSTM)\n\n encoder_optimizer : optim.Optimizer\n Optimizer for encoder (Adam) (if fine-tune)\n\n decoder_optimizer : optim.Optimizer\n Optimizer for decoder (Adam)\n\n loss_function : nn.Module\n Loss function (cross entropy)\n\n grad_clip : float\n Gradient threshold in clip gradients\n\n tau : float\n Penalty term ฯ for doubly stochastic attention in paper: show, attend and tell\n\n fine_tune_encoder : bool\n Fine-tune encoder or not\n\n tensorboard : bool, optional, default=False\n Enable tensorboard or not?\n\n log_dir : str, optional\n Path to the folder to save logs for tensorboard\n \"\"\"\n def __init__(\n self,\n caption_model: str,\n epochs: int,\n device: torch.device,\n word_map: Dict[str, int],\n rev_word_map: Dict[int, str],\n start_epoch: int,\n epochs_since_improvement: int,\n best_bleu4: float,\n train_loader: DataLoader,\n val_loader: DataLoader,\n encoder: nn.Module,\n decoder: nn.Module,\n encoder_optimizer: optim.Optimizer,\n decoder_optimizer: optim.Optimizer,\n loss_function: nn.Module,\n grad_clip: float,\n tau: float,\n fine_tune_encoder: bool,\n tensorboard: bool = False,\n log_dir: Optional[str] = None\n ) -> None:\n self.device = device # GPU / CPU\n\n self.caption_model = caption_model\n self.epochs = epochs\n self.word_map = word_map\n self.rev_word_map = rev_word_map\n\n self.start_epoch = start_epoch\n self.epochs_since_improvement = epochs_since_improvement\n self.best_bleu4 = best_bleu4\n\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.encoder = encoder\n self.decoder = decoder\n self.encoder_optimizer = encoder_optimizer\n self.decoder_optimizer = decoder_optimizer\n self.loss_function = loss_function\n\n self.tau = tau\n self.grad_clip = grad_clip\n self.fine_tune_encoder = fine_tune_encoder\n\n self.print_freq = 100 # print training/validation stats every __ batches\n # setup visualization writer instance\n self.writer = TensorboardWriter(log_dir, tensorboard)\n self.len_epoch = len(self.train_loader)\n\n def train(self, epoch: int) -> None:\n \"\"\"\n Train an epoch\n\n Parameters\n ----------\n epoch : int\n Current number of epoch\n \"\"\"\n self.decoder.train() # train mode (dropout and batchnorm is used)\n self.encoder.train()\n\n batch_time = AverageMeter() # forward prop. + back prop. time\n data_time = AverageMeter() # data loading time\n losses = AverageMeter(tag='loss', writer=self.writer) # loss (per word decoded)\n top5accs = AverageMeter(tag='top5acc', writer=self.writer) # top5 accuracy\n\n start = time.time()\n\n # batches\n for i, (imgs, caps, caplens) in enumerate(self.train_loader):\n data_time.update(time.time() - start)\n\n # Move to GPU, if available\n imgs = imgs.to(self.device)\n caps = caps.to(self.device)\n caplens = caplens.to(self.device)\n\n # forward encoder\n imgs = self.encoder(imgs)\n\n # forward decoder\n if self.caption_model == 'att2all':\n scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)\n else:\n scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)\n\n # since we decoded starting with <start>, the targets are all words after <start>, up to <end>\n targets = caps_sorted[:, 1:]\n\n # remove timesteps that we didn't decode at, or are pads\n # pack_padded_sequence is an easy trick to do this\n scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]\n targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]\n\n # calc loss\n loss = self.loss_function(scores, targets)\n\n # doubly stochastic attention regularization (in paper: show, attend and tell)\n if self.caption_model == 'att2all':\n loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()\n\n # clear gradient of last batch\n self.decoder_optimizer.zero_grad()\n if self.encoder_optimizer is not None:\n self.encoder_optimizer.zero_grad()\n\n # backward\n loss.backward()\n\n # clip gradients\n if self.grad_clip is not None:\n clip_gradient(self.decoder_optimizer, self.grad_clip)\n if self.encoder_optimizer is not None:\n clip_gradient(self.encoder_optimizer, self.grad_clip)\n\n # update weights\n self.decoder_optimizer.step()\n if self.encoder_optimizer is not None:\n self.encoder_optimizer.step()\n\n # set step for tensorboard\n step = (epoch - 1) * self.len_epoch + i\n self.writer.set_step(step=step, mode='train')\n\n # keep track of metrics\n top5 = accuracy(scores, targets, 5)\n losses.update(loss.item(), sum(decode_lengths))\n top5accs.update(top5, sum(decode_lengths))\n batch_time.update(time.time() - start)\n\n start = time.time()\n\n # print status\n if i % self.print_freq == 0:\n print(\n 'Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(self.train_loader),\n batch_time = batch_time,\n data_time = data_time,\n loss = losses,\n top5 = top5accs\n )\n )\n\n def validate(self) -> float:\n \"\"\"\n Validate an epoch.\n\n Returns\n -------\n bleu4 : float\n BLEU-4 score\n \"\"\"\n self.decoder.eval() # eval mode (no dropout or batchnorm)\n if self.encoder is not None:\n self.encoder.eval()\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n top5accs = AverageMeter()\n\n start = time.time()\n\n ground_truth = list() # ground_truth (true captions) for calculating BLEU-4 score\n prediction = list() # prediction (predicted captions)\n\n # explicitly disable gradient calculation to avoid CUDA memory error\n # solves the issue #57\n with torch.no_grad():\n # Batches\n for i, (imgs, caps, caplens, allcaps) in enumerate(self.val_loader):\n\n # move to device, if available\n imgs = imgs.to(self.device)\n caps = caps.to(self.device)\n caplens = caplens.to(self.device)\n\n # forward encoder\n if self.encoder is not None:\n imgs = self.encoder(imgs)\n\n # forward decoder\n if self.caption_model == 'att2all':\n scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)\n else:\n scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)\n\n # since we decoded starting with <start>, the targets are all words after <start>, up to <end>\n targets = caps_sorted[:, 1:]\n\n # remove timesteps that we didn't decode at, or are pads\n # pack_padded_sequence is an easy trick to do this\n scores_copy = scores.clone()\n scores = pack_padded_sequence(scores, decode_lengths, batch_first = True)[0]\n targets = pack_padded_sequence(targets, decode_lengths, batch_first = True)[0]\n\n # calc loss\n loss = self.loss_function(scores, targets)\n\n # doubly stochastic attention regularization (in paper: show, attend and tell)\n if self.caption_model == 'att2all':\n loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()\n\n # keep track of metrics\n losses.update(loss.item(), sum(decode_lengths))\n top5 = accuracy(scores, targets, 5)\n top5accs.update(top5, sum(decode_lengths))\n batch_time.update(time.time() - start)\n\n start = time.time()\n\n if i % self.print_freq == 0:\n print('Validation: [{0}/{1}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\\t'.format(i, len(self.val_loader),\n batch_time = batch_time,\n loss = losses,\n top5 = top5accs)\n )\n\n # store ground truth captions and predicted captions of each image\n # for n images, each of them has one prediction and multiple ground truths (a, b, c...):\n # prediction = [ [hyp1], [hyp2], ..., [hypn] ]\n # ground_truth = [ [ [ref1a], [ref1b], [ref1c] ], ..., [ [refna], [refnb] ] ]\n\n # ground truth\n allcaps = allcaps[sort_ind] # because images were sorted in the decoder\n for j in range(allcaps.shape[0]):\n img_caps = allcaps[j].tolist()\n img_captions = list(\n map(\n lambda c: [w for w in c if w not in {self.word_map['<start>'], self.word_map['<pad>']}],\n img_caps\n )\n ) # remove <start> and pads\n ground_truth.append(img_captions)\n\n # prediction\n _, preds = torch.max(scores_copy, dim = 2)\n preds = preds.tolist()\n temp_preds = list()\n for j, p in enumerate(preds):\n temp_preds.append(preds[j][:decode_lengths[j]]) # remove pads\n preds = temp_preds\n prediction.extend(preds)\n\n assert len(ground_truth) == len(prediction)\n\n # calc BLEU-4 and CIDEr score\n metrics = Metrics(ground_truth, prediction, self.rev_word_map)\n bleu4 = metrics.belu[3] # BLEU-4\n cider = metrics.cider # CIDEr\n\n print(\n '\\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\\n'.format(\n loss = losses,\n top5 = top5accs,\n bleu = bleu4,\n cider = cider\n )\n )\n\n return bleu4\n\n def run_train(self) -> None:\n # epochs\n for epoch in range(self.start_epoch, self.epochs):\n\n # decay learning rate if there is no improvement for 8 consecutive epochs\n # terminate training if there is no improvement for 20 consecutive epochs\n if self.epochs_since_improvement == 20:\n break\n if self.epochs_since_improvement > 0 and self.epochs_since_improvement % 8 == 0:\n adjust_learning_rate(self.decoder_optimizer, 0.8)\n if self.fine_tune_encoder:\n adjust_learning_rate(self.encoder_optimizer, 0.8)\n\n # train an epoch\n self.train(epoch = epoch)\n\n # validate an epoch\n recent_bleu4 = self.validate()\n\n # epochs num since last improvement\n is_best = recent_bleu4 > self.best_bleu4\n self.best_bleu4 = max(recent_bleu4, self.best_bleu4)\n if not is_best:\n self.epochs_since_improvement += 1\n print(\"\\nEpochs since last improvement: %d\\n\" % (self.epochs_since_improvement,))\n else:\n self.epochs_since_improvement = 0\n\n # save checkpoint\n save_checkpoint(\n epoch = epoch,\n epochs_since_improvement = self.epochs_since_improvement,\n encoder = self.encoder,\n decoder = self.decoder,\n encoder_optimizer = self.encoder_optimizer,\n decoder_optimizer = self.decoder_optimizer,\n caption_model = self.caption_model,\n bleu4 = recent_bleu4,\n is_best = is_best\n )\n"
] |
[
[
"torch.no_grad",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.max"
]
] |
eungbean/knowledge-distillation-cifar10
|
[
"683379804c8724d097a845cee85f130b6767dbd7"
] |
[
"model/studentB.py"
] |
[
"\"\"\"\n Baseline CNN, losss function and metrics\n Also customizes knowledge distillation (KD) loss function here\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\"\"\"\nThis is the standard way to define your own network in PyTorch. You typically choose the components\n(e.g. LSTMs, linear layers etc.) of your network in the __init__ function. You then apply these layers\non the input step-by-step in the forward function. You can use torch.nn.functional to apply functions\n\nsuch as F.relu, F.sigmoid, F.softmax, F.max_pool2d. Be careful to ensure your dimensions are correct after each\nstep. You are encouraged to have a look at the network in pytorch/nlp/model/net.py to get a better sense of how\nyou can go about defining your own network.\n\nThe documentation for all the various components available o you is here: http://pytorch.org/docs/master/nn.html\n\"\"\"\n\nclass studentB(nn.Module):\n def __init__(self, params):\n \"\"\"\n We define an convolutional network that predicts the sign from an image. The components\n required are:\n\n Args:\n params: (Params) contains num_channels\n \"\"\"\n super(studentB, self).__init__()\n self.num_channels = params.num_channels\n \n # each of the convolution layers below have the arguments (input_channels, output_channels, filter_size,\n # stride, padding). We also include batch normalisation layers that help stabilise training.\n # For more details on how to use these layers, check out the documentation.\n\n self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)\n self.bn1 = nn.BatchNorm2d(32)\n\n self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)\n self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)\n self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)\n self.bn2 = nn.BatchNorm2d(64)\n\n self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)\n self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)\n self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)\n self.bn3 = nn.BatchNorm2d(128)\n\n\n # 2 fully connected layers to transform the output of the convolution layers to the final output\n self.fc1 = nn.Linear(4*4*128, 500)\n self.fcbn1 = nn.BatchNorm1d(500)\n self.fc2 = nn.Linear(500, 10) \n self.dropout_rate = params.dropout_rate\n\n def forward(self, s):\n \"\"\"\n This function defines how we use the components of our network to operate on an input batch.\n\n Args:\n s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .\n\n Returns:\n out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.\n\n Note: the dimensions after each step are provided\n \"\"\"\n # -> batch_size x 3 x 32 x 32\n # we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3\n s = self.bn1(self.conv1(s)) # batch_size x 32 x 32 x 32\n s = F.relu(F.max_pool2d(s, 2)) # batch_size x 32 x 16 x 16\n\n s = self.conv2_1(s)\n s = self.conv2_2(s)\n s = self.conv2_3(s)\n s = self.bn2(s) # batch_size x 10 * 2 x 16 x 16\n s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8\n \n s = self.conv3_1(s)\n s = self.conv3_2(s)\n s = self.conv3_3(s)\n s = self.bn3(s) # batch_size x 10 * 2 x 16 x 16\n s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8\n\n # flatten the output for each image\n s = s.view(-1, 4*4*128) # batch_size x 4*4*num_channels*4\n\n # apply 2 fully connected layers with dropout\n s = F.dropout(F.relu(self.fcbn1(self.fc1(s))), \n p=self.dropout_rate, training=self.training) # batch_size x self.num_channels*4\n s = self.fc2(s) # batch_size x 10\n\n return s\n\n\ndef loss_fn(outputs, labels):\n \"\"\"\n Compute the cross entropy loss given outputs and labels.\n\n Args:\n outputs: (Variable) dimension batch_size x 6 - output of the model\n labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]\n\n Returns:\n loss (Variable): cross entropy loss for all images in the batch\n\n Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example\n demonstrates how you can easily define a custom loss function.\n \"\"\"\n return nn.CrossEntropyLoss()(outputs, labels)\n\n\ndef loss_fn_kd(outputs, labels, teacher_outputs, params):\n \"\"\"\n Compute the knowledge-distillation (KD) loss given outputs, labels.\n \"Hyperparameters\": temperature and alpha\n\n NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher\n and student expects the input tensor to be log probabilities! See Issue #2\n \"\"\"\n alpha = params.alpha\n T = params.temperature\n KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),\n F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \\\n F.cross_entropy(outputs, labels) * (1. - alpha)\n\n return KD_loss\n\n\ndef accuracy(outputs, labels):\n \"\"\"\n Compute the accuracy, given the outputs and labels for all images.\n\n Args:\n outputs: (np.ndarray) output of the model\n labels: (np.ndarray) [0, 1, ..., num_classes-1]\n\n Returns: (float) accuracy in [0,1]\n \"\"\"\n outputs = np.argmax(outputs, axis=1)\n return np.sum(outputs==labels)/float(labels.size)\n\n\n# maintain all metrics required in this dictionary- these are used in the training and evaluation loops\nmetrics = {\n 'accuracy': accuracy,\n # could add more metrics such as accuracy for each token type\n}"
] |
[
[
"torch.nn.Linear",
"numpy.sum",
"torch.nn.BatchNorm2d",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"numpy.argmax",
"torch.nn.functional.cross_entropy",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.nn.functional.max_pool2d",
"torch.nn.CrossEntropyLoss"
]
] |
saroudant/sobolev_alignment_manuscript
|
[
"2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193"
] |
[
"figure_3/model_III/sobolev_alignment/train_VAE.py"
] |
[
"\"\"\"\nThis script:\n- Train Sobolev Alignment.\n- Save the two networks.\n\"\"\"\n\nimport os, sys, getopt\nimport pandas as pd\nimport numpy as np\nimport re\nfrom anndata import AnnData\nimport torch\nfrom pickle import dump, load\nfrom copy import deepcopy\nimport gc\n\nfrom sobolev_alignment import SobolevAlignment\n\n# Import params\nfrom model_III_synthetic_params import *\nfrom read_data import read_data\n\n\n# Import parameters\nn_artificial_samples = None\ntmp_file = None\nopts, args = getopt.getopt(sys.argv[1:],'o:d:n:t:j:p:',['output=', 'data=', 'artifsamples=', 'temp=', 'job=', 'perm='])\nfor opt, arg in opts:\n if opt in (\"-o\", \"--output\"):\n output_folder = str(arg)\n elif opt in (\"-d\", \"--data\"):\n data_subfolder = str(arg)\n elif opt in ('-n', '--artifsamples'):\n n_artificial_samples = int(arg)\n elif opt in ('-t', '--temp'):\n tmp_file = str(arg)\n elif opt in ('-j', '--job'):\n n_jobs = int(arg)\nn_artificial_samples = n_artificial_samples if n_artificial_samples is not None else 10**6\nn_artificial_samples = int(n_artificial_samples)\ntmp_file = tmp_file if tmp_file is not None else '/tmp/SM/'\n\n###\n# IMPORT DATA\n###\n\nX_source, X_target = read_data(data_folder, data_subfolder)\ngc.collect()\n\n###\n# Sobolev Alignment start\n###\n\n# Read best parameters\ncell_line_scvi_params, tumor_scvi_params = read_scvi_params(output_folder)\n\nsobolev_alignment_clf = SobolevAlignment(\n source_scvi_params=cell_line_scvi_params,\n target_scvi_params=tumor_scvi_params,\n source_krr_params=default_krr_params,\n target_krr_params=default_krr_params,\n n_jobs=n_jobs\n)\n\n###\n# Training Sobolev Alignment if not already saved.\n###\n\nif 'sobolev_alignment_model' not in os.listdir(output_folder): \n pass\nelse:\n sys.exit(\"VAE ALREADY TRAINED\")\n\nsobolev_alignment_clf.n_jobs = n_jobs\nsobolev_alignment_clf.fit(\n X_source=X_source,\n X_target=X_target,\n source_batch_name=batch_name,\n target_batch_name=batch_name,\n continuous_covariate_names=continuous_covariate_names,\n n_artificial_samples=100,\n fit_vae=True,\n sample_artificial=False,\n krr_approx=False,\n n_samples_per_sample_batch=10**6,\n frac_save_artificial=1.,\n save_mmap=tmp_file,\n log_input=log_input,\n no_posterior_collapse=no_posterior_collapse,\n frob_norm_source=frob_norm_source\n)\n\nif 'sobolev_alignment_model' not in os.listdir(output_folder):\n sobolev_alignment_clf.save('%s/sobolev_alignment_model/'%(output_folder), with_krr=False)\n gc.collect()\n\n # Save embedding\n for x in sobolev_alignment_clf.scvi_models:\n np.savetxt(\n '%s/scvi_embedding_%s.csv'%(output_folder, x),\n sobolev_alignment_clf.scvi_models[x].get_latent_representation()\n )\n\ntorch.cuda.empty_cache()\ngc.collect()\nsys.exit(\"FINISH VAE TRAINING\")"
] |
[
[
"torch.cuda.empty_cache"
]
] |
finite-infinity/tensorflow-serving-yolov3
|
[
"d9244b2b12c2c6370638f48109f7a8f2ffeaa4c4"
] |
[
"train.py"
] |
[
"#! /usr/bin/env python\n# coding=utf-8\n\nimport os\nimport time\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport core.utils as utils\nfrom tqdm import tqdm\nfrom core.dataset import Dataset\nfrom core.yolov3 import YOLOV3\nfrom core.config import cfg\n\nclass YoloTrain(object):\n def __init__(self): # ไปconfigๆไปถ่ทๅๅฐไธไบๅ้\n self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE\n self.classes = utils.read_class_names(cfg.YOLO.CLASSES)\n self.num_classes = len(self.classes)\n self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT\n self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END\n self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS\n self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS\n self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS\n self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT\n self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))\n self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY\n self.max_bbox_per_scale = 150\n self.train_logdir = \"./data/log/train\" # ๆฅๅฟไฟๅญๅฐๅ\n self.trainset = Dataset('train')\n self.testset = Dataset('test')\n self.steps_per_period = len(self.trainset)\n self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n with tf.name_scope('define_input'): # ๅฎไน่พๅ
ฅๅฑ\n self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')\n self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')\n self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')\n self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')\n self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')\n self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')\n self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')\n self.trainable = tf.placeholder(dtype=tf.bool, name='training')\n\n with tf.name_scope(\"define_loss\"): # ๅฎไนๆๅคฑๅฝๆฐ\n self.model = YOLOV3(self.input_data, self.trainable)\n self.net_var = tf.global_variables()\n self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(\n self.label_sbbox, self.label_mbbox, self.label_lbbox,\n self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)\n self.loss = self.giou_loss + self.conf_loss + self.prob_loss\n\n with tf.name_scope('learn_rate'): # ๅฎไนๅญฆไน ็\n self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')\n warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,\n dtype=tf.float64, name='warmup_steps')\n train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,\n dtype=tf.float64, name='train_steps')\n self.learn_rate = tf.cond(\n pred=self.global_step < warmup_steps,\n true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,\n false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *\n (1 + tf.cos(\n (self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))\n )\n global_step_update = tf.assign_add(self.global_step, 1.0)\n\n '''\n warmup_stepsไฝ็จ๏ผ \n ็ฅ็ป็ฝ็ปๅจๅๅผๅง่ฎญ็ป็่ฟ็จไธญๅฎนๆๅบ็ฐloss=NaN็ๆ
ๅต๏ผไธบไบๅฐฝ้้ฟๅ
่ฟไธชๆ
ๅต๏ผๅ ๆญคๅๅง็ๅญฆไน ็่ฎพ็ฝฎๅพๅพไฝ\n ไฝๆฏ่ฟๅไฝฟๅพ่ฎญ็ป้ๅบฆๅๆ
ขไบใๅ ๆญค๏ผ้็จ้ๆธๅขๅคง็ๅญฆไน ็๏ผไป่่พพๅฐๆขๅฏไปฅๅฐฝ้้ฟๅ
ๅบ็ฐnan๏ผๅๅฏไปฅ็ญ่ฎญ็ป่ฟ็จ็จณๅฎไบๅๅขๅคง่ฎญ็ป้ๅบฆ็็ฎ็ใ\n '''\n\n with tf.name_scope(\"define_weight_decay\"): # ๆๆฐๅนณๆป๏ผๅฏไปฅ่ฎฉ็ฎๆณๅจๆๅไธ้ฃไน้่ก๏ผ็ปๆๆดๆ้ฒๆฃๆง\n moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())\n\n # ๆๅฎ้่ฆๆขๅค็ๅๆฐใๅฑ็ญไฟกๆฏ, ไฝ็ฝฎๆๅ๏ผๅๅฐๆจกๅไฝ็งฏใ\n with tf.name_scope('loader_and_saver'):\n variables_to_restore = [v for v in self.net_var if\n v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']]\n self.loader = tf.train.Saver(variables_to_restore)\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)\n\n with tf.name_scope(\"define_first_stage_train\"): # ็ฌฌไธ้ถๆฎต่ฎญ็ป๏ผๅช่ฎญ็ปๆๅฎๅฑ\n self.first_stage_trainable_var_list = []\n for var in tf.trainable_variables():\n var_name = var.op.name\n var_name_mess = str(var_name).split('/')\n if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:\n self.first_stage_trainable_var_list.append(var)\n\n first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,\n var_list=self.first_stage_trainable_var_list)\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n with tf.control_dependencies([first_stage_optimizer, global_step_update]):\n with tf.control_dependencies([moving_ave]):\n self.train_op_with_frozen_variables = tf.no_op()\n\n with tf.name_scope(\"define_second_stage_train\"): # ็ฌฌไบ้ถๆฎต่ฎญ็ป๏ผ้ๆพๆๆๅฑ\n second_stage_trainable_var_list = tf.trainable_variables()\n second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,\n var_list=second_stage_trainable_var_list)\n\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n with tf.control_dependencies([second_stage_optimizer, global_step_update]):\n with tf.control_dependencies([moving_ave]):\n self.train_op_with_all_variables = tf.no_op()\n\n with tf.name_scope('summary'):\n tf.summary.scalar(\"learn_rate\", self.learn_rate)\n tf.summary.scalar(\"giou_loss\", self.giou_loss)\n tf.summary.scalar(\"conf_loss\", self.conf_loss)\n tf.summary.scalar(\"prob_loss\", self.prob_loss)\n tf.summary.scalar(\"total_loss\", self.loss)\n\n logdir = \"./data/log/\" # ๆฅๅฟไฟๅญๅฐๅ\n if os.path.exists(logdir): shutil.rmtree(logdir)\n os.mkdir(logdir)\n self.write_op = tf.summary.merge_all()\n self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)\n\n def train(self):\n self.sess.run(tf.global_variables_initializer())\n try:\n print('=> Restoring weights from: %s ... ' % self.initial_weight)\n self.loader.restore(self.sess, self.initial_weight)\n except:\n print('=> %s does not exist !!!' % self.initial_weight)\n print('=> Now it starts to train YOLOV3 from scratch ...')\n self.first_stage_epochs = 0\n\n # ้ถๆฎตๅญฆไน ็\n for epoch in range(1, 1 + self.first_stage_epochs + self.second_stage_epochs):\n if epoch <= self.first_stage_epochs:\n train_op = self.train_op_with_frozen_variables\n else:\n train_op = self.train_op_with_all_variables\n\n # tqdm is a visualization tool that displays an Iterable object in a progree bar\n pbar = tqdm(self.trainset)\n train_epoch_loss, test_epoch_loss = [], []\n\n for train_data in pbar:\n _, summary, train_step_loss, global_step_val = self.sess.run(\n [train_op, self.write_op, self.loss, self.global_step], feed_dict={\n self.input_data: train_data[0],\n self.label_sbbox: train_data[1],\n self.label_mbbox: train_data[2],\n self.label_lbbox: train_data[3],\n self.true_sbboxes: train_data[4],\n self.true_mbboxes: train_data[5],\n self.true_lbboxes: train_data[6],\n self.trainable: True,\n })\n\n train_epoch_loss.append(train_step_loss)\n self.summary_writer.add_summary(summary, global_step_val)\n pbar.set_description(\"train loss: %.2f\" % train_step_loss)\n\n for test_data in self.testset:\n test_step_loss = self.sess.run(self.loss, feed_dict={\n self.input_data: test_data[0],\n self.label_sbbox: test_data[1],\n self.label_mbbox: test_data[2],\n self.label_lbbox: test_data[3],\n self.true_sbboxes: test_data[4],\n self.true_mbboxes: test_data[5],\n self.true_lbboxes: test_data[6],\n self.trainable: False,\n })\n\n test_epoch_loss.append(test_step_loss)\n\n train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)\n ckpt_file = \"./checkpoint/yolov3_train_loss=%.4f.ckpt\" % train_epoch_loss\n log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n print(\"=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ...\"\n % (epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))\n self.saver.save(self.sess, ckpt_file, global_step=epoch) \n\n\nif __name__ == '__main__': YoloTrain().train()\n"
] |
[
[
"numpy.mean",
"tensorflow.assign_add",
"tensorflow.control_dependencies",
"tensorflow.global_variables_initializer",
"tensorflow.no_op",
"tensorflow.trainable_variables",
"tensorflow.Variable",
"tensorflow.global_variables",
"tensorflow.train.Saver",
"tensorflow.constant",
"tensorflow.ConfigProto",
"tensorflow.get_collection",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.cos",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.summary.merge_all",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.summary.FileWriter"
]
] |
mister-bailey/e3nn
|
[
"43d4b12f5ba5947583feb35f4e0662b73aae5618"
] |
[
"tests/readme_example_test.py"
] |
[
"\n# pylint: disable=missing-docstring, line-too-long, invalid-name, arguments-differ, no-member, pointless-statement\nfrom functools import partial\n\nimport torch\n\nfrom e3nn import Kernel, rs\nfrom e3nn.non_linearities.norm import Norm\nfrom e3nn.non_linearities.rescaled_act import swish\nfrom e3nn.point.operations import Convolution\nfrom e3nn.radial import GaussianRadialModel\n\n# Define the input and output representations\nRs_in = [(1, 0), (2, 1)] # Input = One scalar plus two vectors\nRs_out = [(1, 1)] # Output = One single vector\n\n# Radial model: R+ -> R^d\nRadialModel = partial(GaussianRadialModel, max_radius=3.0, number_of_basis=3, h=100, L=1, act=swish)\n\n# kernel: composed on a radial part that contains the learned parameters\n# and an angular part given by the spherical hamonics and the Clebsch-Gordan coefficients\nK = partial(Kernel, RadialModel=RadialModel)\n\n# Create the convolution module\nconv = Convolution(K(Rs_in, Rs_out))\n\n# Module to compute the norm of each irreducible component\nnorm = Norm(Rs_out)\n\n\nn = 5 # number of input points\nfeatures = rs.randn(1, n, Rs_in, requires_grad=True)\nin_geometry = torch.randn(1, n, 3)\nout_geometry = torch.zeros(1, 1, 3) # One point at the origin\n\n\nout = norm(conv(features, in_geometry, out_geometry))\nout.backward()\n\nprint(out)\nprint(features.grad)\n"
] |
[
[
"torch.zeros",
"torch.randn"
]
] |
milesgray/CALAE
|
[
"911378b855f567e942336ae609cb8edb52e55228",
"a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac",
"a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac"
] |
[
"loss/robust_loss_pytorch/robust_loss_pytorch/cubic_spline.py",
"metrics/niqe.py",
"loss/truncated.py"
] |
[
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implements 1D cubic Hermite spline interpolation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\n\n\ndef interpolate1d(x, values, tangents):\n r\"\"\"Perform cubic hermite spline interpolation on a 1D spline.\n\n The x coordinates of the spline knots are at [0 : 1 : len(values)-1].\n Queries outside of the range of the spline are computed using linear\n extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline\n for details, where \"x\" corresponds to `x`, \"p\" corresponds to `values`, and\n \"m\" corresponds to `tangents`.\n\n Args:\n x: A tensor of any size of single or double precision floats containing the\n set of values to be used for interpolation into the spline.\n values: A vector of single or double precision floats containing the value\n of each knot of the spline being interpolated into. Must be the same\n length as `tangents` and the same type as `x`.\n tangents: A vector of single or double precision floats containing the\n tangent (derivative) of each knot of the spline being interpolated into.\n Must be the same length as `values` and the same type as `x`.\n\n Returns:\n The result of interpolating along the spline defined by `values`, and\n `tangents`, using `x` as the query values. Will be the same length and type\n as `x`.\n \"\"\"\n # if x.dtype == 'float64' or torch.as_tensor(x).dtype == torch.float64:\n # float_dtype = torch.float64\n # else:\n # float_dtype = torch.float32\n # x = torch.as_tensor(x, dtype=float_dtype)\n # values = torch.as_tensor(values, dtype=float_dtype)\n # tangents = torch.as_tensor(tangents, dtype=float_dtype)\n assert torch.is_tensor(x)\n assert torch.is_tensor(values)\n assert torch.is_tensor(tangents)\n float_dtype = x.dtype\n assert values.dtype == float_dtype\n assert tangents.dtype == float_dtype\n assert len(values.shape) == 1\n assert len(tangents.shape) == 1\n assert values.shape[0] == tangents.shape[0]\n\n x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),\n values.shape[0] - 2)).type(torch.int64)\n x_hi = x_lo + 1\n\n # Compute the relative distance between each `x` and the knot below it.\n t = x - x_lo.type(float_dtype)\n\n # Compute the cubic hermite expansion of `t`.\n t_sq = t**2\n t_cu = t * t_sq\n h01 = -2. * t_cu + 3. * t_sq\n h00 = 1. - h01\n h11 = t_cu - t_sq\n h10 = h11 - t_sq + t\n\n # Linearly extrapolate above and below the extents of the spline for all\n # values.\n value_before = tangents[0] * t + values[0]\n value_after = tangents[-1] * (t - 1.) + values[-1]\n\n # Cubically interpolate between the knots below and above each query point.\n neighbor_values_lo = values[x_lo]\n neighbor_values_hi = values[x_hi]\n neighbor_tangents_lo = tangents[x_lo]\n neighbor_tangents_hi = tangents[x_hi]\n value_mid = (\n neighbor_values_lo * h00 + neighbor_values_hi * h01 +\n neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)\n\n # Return the interpolated or extrapolated values for each query point,\n # depending on whether or not the query lies within the span of the spline.\n return torch.where(t < 0., value_before,\n torch.where(t > 1., value_after, value_mid))\n",
"import math\nfrom os.path import dirname, join\n\nimport cv2\nimport numpy as np\nimport scipy\nimport scipy.io\nimport scipy.misc\nimport scipy.ndimage\nimport scipy.special\nfrom PIL import Image\n\ngamma_range = np.arange(0.2, 10, 0.001)\na = scipy.special.gamma(2.0/gamma_range)\na *= a\nb = scipy.special.gamma(1.0/gamma_range)\nc = scipy.special.gamma(3.0/gamma_range)\nprec_gammas = a/(b*c)\n\n\ndef aggd_features(imdata):\n # flatten imdata\n imdata.shape = (len(imdata.flat),)\n imdata2 = imdata*imdata\n left_data = imdata2[imdata < 0]\n right_data = imdata2[imdata >= 0]\n left_mean_sqrt = 0\n right_mean_sqrt = 0\n if len(left_data) > 0:\n left_mean_sqrt = np.sqrt(np.average(left_data))\n if len(right_data) > 0:\n right_mean_sqrt = np.sqrt(np.average(right_data))\n\n if right_mean_sqrt != 0:\n gamma_hat = left_mean_sqrt/right_mean_sqrt\n else:\n gamma_hat = np.inf\n # solve r-hat norm\n\n imdata2_mean = np.mean(imdata2)\n if imdata2_mean != 0:\n r_hat = (np.average(np.abs(imdata))**2) / (np.average(imdata2))\n else:\n r_hat = np.inf\n rhat_norm = r_hat * (((math.pow(gamma_hat, 3) + 1) *\n (gamma_hat + 1)) / math.pow(math.pow(gamma_hat, 2) + 1, 2))\n\n # solve alpha by guessing values that minimize ro\n pos = np.argmin((prec_gammas - rhat_norm)**2)\n alpha = gamma_range[pos]\n\n gam1 = scipy.special.gamma(1.0/alpha)\n gam2 = scipy.special.gamma(2.0/alpha)\n gam3 = scipy.special.gamma(3.0/alpha)\n\n aggdratio = np.sqrt(gam1) / np.sqrt(gam3)\n bl = aggdratio * left_mean_sqrt\n br = aggdratio * right_mean_sqrt\n\n # mean parameter\n N = (br - bl)*(gam2 / gam1) # *aggdratio\n return (alpha, N, bl, br, left_mean_sqrt, right_mean_sqrt)\n\n\ndef ggd_features(imdata):\n nr_gam = 1/prec_gammas\n sigma_sq = np.var(imdata)\n E = np.mean(np.abs(imdata))\n rho = sigma_sq/E**2\n pos = np.argmin(np.abs(nr_gam - rho))\n return gamma_range[pos], sigma_sq\n\n\ndef paired_product(new_im):\n shift1 = np.roll(new_im.copy(), 1, axis=1)\n shift2 = np.roll(new_im.copy(), 1, axis=0)\n shift3 = np.roll(np.roll(new_im.copy(), 1, axis=0), 1, axis=1)\n shift4 = np.roll(np.roll(new_im.copy(), 1, axis=0), -1, axis=1)\n\n H_img = shift1 * new_im\n V_img = shift2 * new_im\n D1_img = shift3 * new_im\n D2_img = shift4 * new_im\n\n return (H_img, V_img, D1_img, D2_img)\n\n\ndef gen_gauss_window(lw, sigma):\n sd = np.float32(sigma)\n lw = int(lw)\n weights = [0.0] * (2 * lw + 1)\n weights[lw] = 1.0\n sum = 1.0\n sd *= sd\n for ii in range(1, lw + 1):\n tmp = np.exp(-0.5 * np.float32(ii * ii) / sd)\n weights[lw + ii] = tmp\n weights[lw - ii] = tmp\n sum += 2.0 * tmp\n for ii in range(2 * lw + 1):\n weights[ii] /= sum\n return weights\n\n\ndef compute_image_mscn_transform(image, C=1, avg_window=None, extend_mode='constant'):\n if avg_window is None:\n avg_window = gen_gauss_window(3, 7.0/6.0)\n assert len(np.shape(image)) == 2\n h, w = np.shape(image)\n mu_image = np.zeros((h, w), dtype=np.float32)\n var_image = np.zeros((h, w), dtype=np.float32)\n image = np.array(image).astype('float32')\n scipy.ndimage.correlate1d(image, avg_window, 0, mu_image, mode=extend_mode)\n scipy.ndimage.correlate1d(mu_image, avg_window, 1,\n mu_image, mode=extend_mode)\n scipy.ndimage.correlate1d(image**2, avg_window, 0,\n var_image, mode=extend_mode)\n scipy.ndimage.correlate1d(var_image, avg_window,\n 1, var_image, mode=extend_mode)\n var_image = np.sqrt(np.abs(var_image - mu_image**2))\n return (image - mu_image)/(var_image + C), var_image, mu_image\n\n\ndef _niqe_extract_subband_feats(mscncoefs):\n # alpha_m, = extract_ggd_features(mscncoefs)\n alpha_m, N, bl, br, lsq, rsq = aggd_features(mscncoefs.copy())\n pps1, pps2, pps3, pps4 = paired_product(mscncoefs)\n alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)\n alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)\n alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)\n alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)\n return np.array([alpha_m, (bl+br)/2.0,\n alpha1, N1, bl1, br1, # (V)\n alpha2, N2, bl2, br2, # (H)\n alpha3, N3, bl3, bl3, # (D1)\n alpha4, N4, bl4, bl4, # (D2)\n ])\n\n\ndef get_patches_train_features(img, patch_size, stride=8):\n return _get_patches_generic(img, patch_size, 1, stride)\n\n\ndef get_patches_test_features(img, patch_size, stride=8):\n return _get_patches_generic(img, patch_size, 0, stride)\n\n\ndef extract_on_patches(img, patch_size):\n h, w = img.shape\n patch_size = np.int(patch_size)\n patches = []\n for j in range(0, h-patch_size+1, patch_size):\n for i in range(0, w-patch_size+1, patch_size):\n patch = img[j:j+patch_size, i:i+patch_size]\n patches.append(patch)\n\n patches = np.array(patches)\n\n patch_features = []\n for p in patches:\n patch_features.append(_niqe_extract_subband_feats(p))\n patch_features = np.array(patch_features)\n\n return patch_features\n\n\ndef _get_patches_generic(img, patch_size, is_train, stride):\n h, w = np.shape(img)\n if h < patch_size or w < patch_size:\n print(\"Input image is too small\")\n exit(0)\n\n # ensure that the patch divides evenly into img\n hoffset = (h % patch_size)\n woffset = (w % patch_size)\n\n if hoffset > 0:\n img = img[:-hoffset, :]\n if woffset > 0:\n img = img[:, :-woffset]\n\n img = img.astype(np.float32)\n # img2 = scipy.misc.imresize(img, 0.5, interp='bicubic', mode='F')\n img2 = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\n\n mscn1, var, mu = compute_image_mscn_transform(img)\n mscn1 = mscn1.astype(np.float32)\n\n mscn2, _, _ = compute_image_mscn_transform(img2)\n mscn2 = mscn2.astype(np.float32)\n\n feats_lvl1 = extract_on_patches(mscn1, patch_size)\n feats_lvl2 = extract_on_patches(mscn2, patch_size/2)\n\n feats = np.hstack((feats_lvl1, feats_lvl2)) # feats_lvl3))\n\n return feats\n\n\ndef niqe(inputImgData):\n\n patch_size = 96\n module_path = dirname(__file__)\n\n # TODO: memoize\n params = scipy.io.loadmat(\n join(module_path, '/weights/niqe_image_params.mat'))\n pop_mu = np.ravel(params[\"pop_mu\"])\n pop_cov = params[\"pop_cov\"]\n\n if inputImgData.ndim == 3:\n inputImgData = cv2.cvtColor(inputImgData, cv2.COLOR_BGR2GRAY)\n M, N = inputImgData.shape\n\n # assert C == 1, \"niqe called with videos containing %d channels. Please supply only the luminance channel\" % (C,)\n assert M > (patch_size*2+1), \"niqe called with small frame size, requires > 192x192 resolution video using current training parameters\"\n assert N > (patch_size*2+1), \"niqe called with small frame size, requires > 192x192 resolution video using current training parameters\"\n\n feats = get_patches_test_features(inputImgData, patch_size)\n sample_mu = np.mean(feats, axis=0)\n sample_cov = np.cov(feats.T)\n\n X = sample_mu - pop_mu\n covmat = ((pop_cov+sample_cov)/2.0)\n pinvmat = scipy.linalg.pinv(covmat)\n niqe_score = np.sqrt(np.dot(np.dot(X, pinvmat), X))\n\n return niqe_score",
"\"\"\"https://github.com/AlanChou/Truncated-Loss/blob/master/TruncatedLoss.py\nThis is the unofficial PyTorch implementation of the paper \n\"Generalized Cross Entropy Loss for Training Deep Neural Networks with Noisy Labels\" \nin NIPS 2018.\nhttps://arxiv.org/abs/1805.07836\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport numpy as np\n\n\nclass TruncatedLoss(nn.Module):\n def __init__(self, q=0.7, k=0.5, trainset_size=50000):\n \"\"\"'Generalized Cross Entropy Loss for Training Deep Neural Networks with Noisy Labels'\n\n Args:\n q (float, optional): Weight for labels. Defaults to 0.7.\n k (float, optional): Weight for mainly logits, but not only.. Defaults to 0.5.\n trainset_size (int, optional): Size of the internal weight tensor, \n needs shape [DS, 1] (DS = total samples being trained on). \n Defaults to 50000.\n \"\"\"\n super(TruncatedLoss, self).__init__()\n self.q = q\n self.k = k\n self.weight = torch.nn.Parameter(\n data=torch.ones(trainset_size, 1), requires_grad=False\n )\n\n def forward(self, logits, targets, indexes):\n p = F.softmax(logits, dim=1)\n Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))\n\n loss = ((1 - (Yg ** self.q)) / self.q) * self.weight[indexes] - (\n (1 - (self.k ** self.q)) / self.q\n ) * self.weight[indexes]\n loss = torch.mean(loss)\n\n return loss\n def update_weight(self, logits, targets, indexes):\n p = F.softmax(logits, dim=1)\n Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))\n Lq = (1 - (Yg ** self.q)) / self.q\n Lqk = np.repeat(((1 - (self.k ** self.q)) / self.q), targets.size(0))\n Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)\n Lqk = torch.unsqueeze(Lqk, 1)\n\n condition = torch.gt(Lqk, Lq)\n self.weight[indexes] = condition.type(torch.cuda.FloatTensor)\n"
] |
[
[
"torch.is_tensor",
"torch.as_tensor",
"torch.where"
],
[
"scipy.special.gamma",
"numpy.dot",
"numpy.argmin",
"numpy.mean",
"scipy.linalg.pinv",
"numpy.arange",
"numpy.sqrt",
"numpy.array",
"numpy.int",
"numpy.zeros",
"numpy.shape",
"numpy.float32",
"numpy.hstack",
"numpy.cov",
"scipy.ndimage.correlate1d",
"numpy.ravel",
"numpy.abs",
"numpy.average",
"numpy.var"
],
[
"torch.gt",
"torch.ones",
"torch.unsqueeze",
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.mean"
]
] |
carlosb1/jina-hub
|
[
"f298d0f136c8627dd720d7a4e3eb9031655f5ccb",
"f298d0f136c8627dd720d7a4e3eb9031655f5ccb"
] |
[
"crafters/numeric/ArrayStringReader/tests/test_arraystringreader.py",
"rankers/MinRanker/tests/test_minranker.py"
] |
[
"import numpy as np\n\nfrom .. import ArrayStringReader\n\n\ndef test_arraystringreader():\n \"\"\"here is my test code\n\n https://docs.pytest.org/en/stable/getting-started.html#create-your-first-test\n \"\"\"\n size = 8\n sample_array = np.random.rand(size).astype('float32')\n text = ','.join([str(x) for x in sample_array])\n\n reader = ArrayStringReader()\n crafted_doc = reader.craft(text, 0)\n\n assert crafted_doc['blob'].shape[0] == size\n np.testing.assert_array_equal(crafted_doc['blob'], sample_array)\n",
"import numpy as np\n\nfrom jina.executors.rankers import Chunk2DocRanker\n\nfrom .. import MinRanker\n\n\ndef create_data():\n query_chunk2match_chunk = {\n 100: [\n {'parent_id': 1, 'id': 10, 'score': 0.4, 'length': 200},\n ],\n 110: [\n {'parent_id': 1, 'id': 10, 'score': 0.3, 'length': 200},\n {'parent_id': 1, 'id': 11, 'score': 0.2, 'length': 200},\n {'parent_id': 4294967294, 'id': 20, 'score': 0.1, 'length': 300},\n ]\n }\n query_chunk_meta = {}\n match_chunk_meta = {}\n match_idx = []\n num_query_chunks = len(query_chunk2match_chunk)\n for query_chunk_id, matches in query_chunk2match_chunk.items():\n query_chunk_meta[query_chunk_id] = {'length': num_query_chunks}\n for c in matches:\n match_chunk_meta[c['id']] = {'length': c['length']}\n match_idx.append((\n c['parent_id'],\n c['id'],\n query_chunk_id,\n c['score'],\n ))\n\n match_idx_numpy = np.array(\n match_idx,\n dtype=[\n (Chunk2DocRanker.COL_MATCH_PARENT_ID, np.int64),\n (Chunk2DocRanker.COL_MATCH_ID, np.int64),\n (Chunk2DocRanker.COL_DOC_CHUNK_ID, np.int64),\n (Chunk2DocRanker.COL_SCORE, np.float64)\n ]\n )\n return match_idx_numpy, query_chunk_meta, match_chunk_meta\n\n\ndef test_minranker():\n ranker = MinRanker()\n match_idx, query_chunk_meta, match_chunk_meta = create_data()\n doc_idx = ranker.score(match_idx, query_chunk_meta, match_chunk_meta)\n # check the matched docs are in descending order of the scores\n assert doc_idx[0][1] > doc_idx[1][1]\n assert doc_idx[0][0] == 4294967294\n assert doc_idx[1][0] == 1\n # check the number of matched docs\n assert len(doc_idx) == 2\n\n\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.random.rand"
],
[
"numpy.array"
]
] |
matiasleize/tesis_licenciatura
|
[
"5df6e341314583702b466b8ed7977d410f0ee457",
"5df6e341314583702b466b8ed7977d410f0ee457",
"5df6e341314583702b466b8ed7977d410f0ee457",
"5df6e341314583702b466b8ed7977d410f0ee457"
] |
[
"Software/Funcionales/funciones_LambdaCDM_AGN.py",
"Software/Estadรญstica/MCMC/HS/Cosas_viejas/MCMC_cronometros_cosmicos_2params_valores_medios.py",
"Software/Estadรญstica/MCMC/Paper/EXP/CC+SN+AGN/derivar_params.py",
"Software/Estadรญstica/MCMC/Paper/HS/CC+SN/4params/derivar_params.py"
] |
[
"\"\"\"\nCreated on Sun Feb 2 13:28:48 2020\n\n@author: matias\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import inv\nfrom matplotlib import pyplot as plt\nimport time\nimport camb\nfrom scipy.integrate import cumtrapz as cumtrapz\nfrom scipy.integrate import simps as simps\nfrom scipy.interpolate import interp1d\nfrom scipy.constants import c as c_luz #metros/segundos\nc_luz_km = c_luz/1000\n\nimport sys\nimport os\nfrom os.path import join as osjoin\nfrom pc_path import definir_path\npath_git, path_datos_global = definir_path()\nos.chdir(path_git)\nsys.path.append('./Software/Funcionales/')\n\nfrom funciones_int import Hubble_teorico\nfrom funciones_AGN import zs_2_logDlH0\n\n#%%\n'''\nDEPRECATED: Antes de eliminar este archivo copiar este ejemplo en otro .py\nen donde se grafiquen los datos.\n'''\nif __name__ == '__main__':\n from scipy.constants import c as c_luz #metros/segundos\n\n from matplotlib import pyplot as plt\n import sys\n import os\n from os.path import join as osjoin\n from pc_path import definir_path\n path_git, path_datos_global = definir_path()\n os.chdir(path_git)\n sys.path.append('./Software/Funcionales/')\n from funciones_data import leer_data_AGN\n\n #%%\n\n def leer_data_AGN(archivo_AGN):\n z, Fuv, eFuv, Fx, eFx = np.loadtxt(archivo_AGN,\n usecols=(3,4,5,6,7), unpack=True)\n arr1inds = z.argsort()\n sorted_z = z[arr1inds]\n sorted_Fuv = Fuv[arr1inds]\n sorted_eFuv = eFuv[arr1inds]\n sorted_Fx = Fx[arr1inds]\n sorted_eFx = eFx[arr1inds]\n return sorted_z, sorted_Fuv, sorted_eFuv, sorted_Fx, sorted_eFx\n\n #Data AGN\n os.chdir(path_git+'/Software/Estadรญstica/Datos/Datos_AGN')\n data_agn = leer_data_AGN('table3.dat')\n\n\n H_0 = 70\n omega_m = 0.99\n gamma = 0.64\n beta = 7\n delta = 0.3\n theta = [omega_m,beta,gamma,delta]\n\n #params_to_chi2_AGN_nuisance(theta, _, data_agn)/(len(z_data)-4)\n\n data_agn = leer_data_AGN('table3.dat')\n z_data_1, logFuv_1, eFuv_1, logFx_1, eFx_1 = data_agn\n\n zmin = 0\n zmax = 100\n mask = (z_data_1 > zmin) & (z_data_1 < zmax)\n\n z_data = z_data_1[mask]\n logFuv = logFuv_1[mask]\n logFx = logFx_1[mask]\n eFx = eFx_1[mask]\n eFuv = eFuv_1[mask]\n\n zs_modelo = np.linspace(0,30,10**6)\n Dl_teo = -np.log10(H_0) + zs_2_logDlH0(zs_modelo,omega_m,z_data)\n Dl_teo_cm = Dl_teo - np.log10(3.24) + 25\n psi = beta + gamma * logFuv + 2 * (gamma-1) * (Dl_teo_cm + 0.5 * np.log10(4*np.pi))\n\n si_2 = eFx**2 + (gamma * eFuv)**2 + np.exp(2*np.log(delta)) #El cuadrado de los errores\n #si_2 = eFx**2 + (gamma * eFuv)**2 + delta**2 #El cuadrado de los errores\n print(np.sum(si_2))\n chi2_AGN = np.sum( ((logFx-psi)**2/si_2) + np.log(2*np.pi*si_2)) # menos en el paper\n\n print(chi2_AGN)\n print(chi2_AGN/(len(z_data)-4))\n\n plt.figure()\n plt.xlabel('z (redshift)')\n plt.ylabel(r'$Fx$')\n plt.errorbar(z_data,psi,np.sqrt(si_2),marker='.',linestyle='')\n plt.plot(z_data,logFx,'.r')\n",
"\"\"\"\nCreated on Wed Feb 5 16:07:35 2020\n\n@author: matias\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import minimize\nimport emcee\nimport corner\nfrom scipy.interpolate import interp1d\n\n\nimport sys\nimport os\nfrom os.path import join as osjoin\nfrom pc_path import definir_path\npath_git, path_datos_global = definir_path()\n\nos.chdir(path_git)\nsys.path.append('./Software/Funcionales/')\nfrom funciones_data import leer_data_cronometros\nfrom funciones_cronometros import params_to_chi2_H0_fijo\n\n#%% Predeterminados:\nH_0 = 73.02 #Unidades de (km/seg)/Mpc\nn = 1\n#Coindiciones iniciales e intervalo\nx_0 = -0.339\ny_0 = 1.246\nv_0 = 1.64\nw_0 = 1 + x_0 + y_0 - v_0\nr_0 = 41\nci = [x_0, y_0, v_0, w_0, r_0] #Condiciones iniciales\n#%%\n\nos.chdir(path_git+'/Software/Estadรญstica/Datos/')\nz_data, H_data, dH = leer_data_cronometros('datos_cronometros.txt')\n\nb_true = 5.2\nomega_m_true = 0.3\n\nnp.random.seed(42)\nlog_likelihood = lambda theta: -0.5 * params_to_chi2_H0_fijo(ci,theta, [H_0,n],z_data,H_data,dH)\nnll = lambda *args: -log_likelihood(*args)\ninitial = np.array([omega_m_true,b_true]) + 0.01 * np.random.randn(2)\nsoln = minimize(nll, initial)#,bounds =([0,1],[-10,10]))\nomega_m_ml, b_ml = soln.x\nprint(omega_m_ml,b_ml)\n\nos.chdir(path_git + '/Software/Estadรญstica/Resultados_simulaciones')\nnp.savez('valores_medios_cronom_2params', sol=soln.x )\n",
"import numpy as np\nimport emcee\nimport sys\nimport os\nfrom os.path import join as osjoin\nfrom pc_path import definir_path\npath_git, path_datos_global = definir_path()\nos.chdir(path_git)\nsys.path.append('./Software/Funcionales/')\nfrom funciones_parametros_derivados import parametros_derivados\n\n#Rellenar acรก:\nmodel='EXP'\ndatasets = 'CC+SN+AGN'\nnum_params = '4params'\n#root_directory=path_datos_global+'/Resultados_cadenas/Paper/'+model\nroot_directory=path_datos_global+'/Resultados_cadenas'\nroot_directory\nos.chdir(root_directory)\nfilename = 'sample_'+model+'_'+datasets+'_'+num_params\nfilename_h5 = filename+'.h5'\nreader = emcee.backends.HDFBackend(filename_h5)\nnwalkers, ndim = reader.shape #Numero de caminantes y de parametros\n\n#%%%\nsamples = reader.get_chain()\nburnin= int(0.2*len(samples[:,0])) #Burnin del 20%\nthin = 1\n\n#%% Defino el burnin y el thin a partir de tau o los pongo a mano\ntau = reader.get_autocorr_time()\n#burnin = int(2 * np.max(tau))\n#thin = int(0.5 * np.min(tau))\n#%%\nsamples = reader.get_chain(discard=burnin, flat=True, thin=thin)\nprint(len(samples)) #numero de pasos efectivos\nprint('Tiempo estimado:{} min'.format(len(samples)/60))\nnew_samples = parametros_derivados(reader,discard=burnin,thin=thin,model=model)\n\n#%%\nnp.savez(filename+'_deriv', new_samples=new_samples)\n#dir = path_datos_global+'/Resultados_cadenas/posprocesado'\nos.chdir(root_directory)\nwith np.load(filename+'_deriv.npz') as data:\n ns = data['new_samples']\n",
"import numpy as np\nimport emcee\nimport sys\nimport os\nfrom pc_path import definir_path\npath_git, path_datos_global = definir_path()\nos.chdir(path_git+'/Software/Funcionales')\nfrom funciones_parametros_derivados import parametros_derivados\n\n#Rellenar acรก:\nmodel='HS'\ndatasets = 'CC+SN'\nnum_params = '4params'\nroot_directory=path_datos_global+'/Resultados_cadenas/'\nroot_directory\nos.chdir(root_directory)\nfilename = 'sample_'+model+'_'+datasets+'_'+num_params\nfilename_h5 = filename+'.h5'\nreader = emcee.backends.HDFBackend(filename_h5)\nnwalkers, ndim = reader.shape #Numero de caminantes y de parametros\n\n#%%%\n#burnin = 1000\n#thin = 50\n#%% Defino el burnin y el thin a partir de tau o los pongo a mano\ntau = reader.get_autocorr_time()\nburnin = int(2 * np.max(tau))\nthin = int(0.5 * np.min(tau))\n#%%\nsamples = reader.get_chain(discard=burnin, flat=True, thin=thin)\nprint(len(samples)) #numero de pasos efectivos\nprint('Tiempo estimado:{} min'.format(len(samples)/60))\nnew_samples = parametros_derivados(reader,discard=burnin,thin=thin,model=model)\nnp.savez(filename+'_deriv', new_samples=new_samples)\n#%%\n\nwith np.load(filename+'_deriv.npz') as data:\n ns = data['new_samples']\n"
] |
[
[
"numpy.log",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"numpy.log10",
"numpy.linspace"
],
[
"numpy.array",
"numpy.random.seed",
"numpy.random.randn",
"numpy.savez",
"scipy.optimize.minimize"
],
[
"numpy.savez",
"numpy.load"
],
[
"numpy.max",
"numpy.savez",
"numpy.load",
"numpy.min"
]
] |
garrettkatz/poppy-simulations
|
[
"cd4d132ab6f8b4e69f2edd89662980d252a27966"
] |
[
"ambulation/envs/poppy_humanoid_keep_standing/poppy_humanoid_keep_standing.py"
] |
[
"import numpy as np\nfrom gym.envs.mujoco import mujoco_env\nfrom gym import utils\n\ndef mass_center(model, sim):\n mass = np.expand_dims(model.body_mass, 1)\n xpos = sim.data.xipos\n return (np.sum(mass * xpos, 0) / np.sum(mass))[0]\n\nclass PoppyHumanoidKeepStandingEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self):\n mujoco_env.MujocoEnv.__init__(self, 'poppy_humanoid/poppy_keep_standing.xml', 5)\n utils.EzPickle.__init__(self)\n\n def _get_obs(self):\n data = self.sim.data\n return np.concatenate([data.qpos.flat[2:],\n data.qvel.flat,\n data.cinert.flat,\n data.cvel.flat,\n data.qfrc_actuator.flat,\n data.cfrc_ext.flat])\n\n def step(self, a):\n pos_before = mass_center(self.model, self.sim)\n self.do_simulation(a, self.frame_skip)\n pos_after = mass_center(self.model, self.sim)\n alive_bonus = 5.0\n data = self.sim.data\n lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt\n quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()\n quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()\n quad_impact_cost = min(quad_impact_cost, 10)\n reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus\n qpos = self.sim.data.qpos\n done = bool((qpos[2] < 0.2) or (qpos[2] > 2.0))\n return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost)\n\n def reset_model(self):\n c = 0.01\n self.set_state(\n self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),\n self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,)\n )\n return self._get_obs()\n\n def viewer_setup(self):\n self.viewer.cam.trackbodyid = 1\n self.viewer.cam.distance = self.model.stat.extent * 1.0\n self.viewer.cam.lookat[2] = 0.8\n self.viewer.cam.elevation = -20\n"
] |
[
[
"numpy.concatenate",
"numpy.sum",
"numpy.expand_dims",
"numpy.square"
]
] |
pattonw/mouselight
|
[
"296e6df7d4e79776ed9f8533d17d937bb6866082"
] |
[
"synthetic/blobs/train.py"
] |
[
"from mahotas import cwatershed\nfrom mala.losses import ultrametric_loss_op\nfrom scipy.ndimage.filters import gaussian_filter\nfrom scipy.ndimage.filters import maximum_filter\nfrom scipy.ndimage.morphology import distance_transform_edt\nimport gunpowder as gp\nimport json\nimport numpy as np\nimport skelerator\nimport tensorflow as tf\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nwith open(\"tensor_names.json\", \"r\") as f:\n tensor_names = json.load(f)\n\n\nclass Synthetic2DSource(gp.BatchProvider):\n def __init__(self, raw, gt, smoothness=1.0, n_objects=3, points_per_skeleton=10):\n\n self.raw = raw\n self.gt = gt\n self.smoothness = smoothness\n self.n_objects = n_objects\n self.points_per_skeleton = points_per_skeleton\n\n def setup(self):\n\n self.provides(\n self.raw,\n gp.ArraySpec(\n roi=gp.Roi((0, 0), (1000, 1000)),\n dtype=np.uint8,\n interpolatable=True,\n voxel_size=(1, 1),\n ),\n )\n self.provides(\n self.gt,\n gp.ArraySpec(\n roi=gp.Roi((0, 0), (1000, 1000)),\n dtype=np.uint64,\n interpolatable=False,\n voxel_size=(1, 1),\n ),\n )\n\n def provide(self, request):\n\n voxel_size = self.spec[self.raw].voxel_size\n shape = gp.Coordinate((1,) + request[self.raw].roi.get_shape())\n\n noise = np.abs(np.random.randn(*shape))\n smoothed_noise = gaussian_filter(noise, sigma=self.smoothness)\n\n seeds = np.zeros(shape, dtype=int)\n for i in range(self.n_objects):\n if i == 0:\n num_points = 100\n else:\n num_points = self.points_per_skeleton\n points = np.stack(\n [np.random.randint(0, shape[dim], num_points) for dim in range(3)],\n axis=1,\n )\n tree = skelerator.Tree(points)\n skeleton = skelerator.Skeleton(\n tree, [1, 1, 1], \"linear\", generate_graph=False\n )\n seeds = skeleton.draw(seeds, np.array([0, 0, 0]), i + 1)\n\n seeds[maximum_filter(seeds, size=4) != seeds] = 0\n seeds_dt = distance_transform_edt(seeds == 0) + 5.0 * smoothed_noise\n gt_data = cwatershed(seeds_dt, seeds).astype(np.uint64)[0] - 1\n\n labels = np.unique(gt_data)\n\n raw_data = np.zeros_like(gt_data, dtype=np.uint8)\n value = 0\n for label in labels:\n raw_data[gt_data == label] = value\n value += 255.0 / self.n_objects\n\n spec = request[self.raw].copy()\n spec.voxel_size = (1, 1)\n raw = gp.Array(raw_data, spec)\n\n spec = request[self.gt].copy()\n spec.voxel_size = (1, 1)\n gt_crop = (\n request[self.gt].roi - request[self.raw].roi.get_begin()\n ) / voxel_size\n gt_crop = gt_crop.to_slices()\n gt = gp.Array(gt_data[gt_crop], spec)\n\n batch = gp.Batch()\n batch[self.raw] = raw\n batch[self.gt] = gt\n\n return batch\n\n\nemst_name = \"PyFuncStateless:0\"\nedges_u_name = \"Gather:0\"\nedges_v_name = \"Gather_1:0\"\n\n\ndef add_loss(graph):\n\n # k, h, w\n embedding = graph.get_tensor_by_name(tensor_names[\"embedding\"])\n\n # h, w\n fg = graph.get_tensor_by_name(tensor_names[\"fg\"])\n\n # h, w\n gt_labels = graph.get_tensor_by_name(tensor_names[\"gt_labels\"])\n\n # h, w\n gt_fg = tf.greater(gt_labels, 0, name=\"gt_fg\")\n\n # h, w\n shape = tuple(fg.get_shape().as_list())\n\n # 1, 1, h, w\n maxima = tf.nn.pool(\n tf.reshape(fg, (1, 1) + shape),\n [10, 10],\n \"MAX\",\n \"SAME\",\n strides=[1, 1],\n data_format=\"NCHW\",\n )\n # h, w\n maxima = tf.reshape(tf.equal(fg, maxima), shape, name=\"maxima\")\n\n # 1, k, h, w\n embedding = tf.reshape(embedding, (1,) + tuple(embedding.get_shape().as_list()))\n # k, 1, h, w\n embedding = tf.transpose(embedding, perm=[1, 0, 2, 3])\n\n um_loss, emst, edges_u, edges_v, _ = ultrametric_loss_op(\n embedding, gt_labels, mask=maxima, coordinate_scale=0.01\n )\n\n assert emst.name == emst_name\n assert edges_u.name == edges_u_name\n assert edges_v.name == edges_v_name\n\n fg_loss = tf.losses.mean_squared_error(gt_fg, fg)\n\n # higher learning rate for fg network\n loss = um_loss + 10 * fg_loss\n\n opt = tf.train.AdamOptimizer(\n learning_rate=0.5e-5, beta1=0.95, beta2=0.999, epsilon=1e-8\n )\n\n optimizer = opt.minimize(loss)\n\n return (loss, optimizer)\n\n\ndef train(n_iterations):\n\n raw = gp.ArrayKey(\"RAW\")\n gt = gp.ArrayKey(\"GT\")\n gt_fg = gp.ArrayKey(\"GT_FP\")\n embedding = gp.ArrayKey(\"EMBEDDING\")\n fg = gp.ArrayKey(\"FG\")\n maxima = gp.ArrayKey(\"MAXIMA\")\n gradient_embedding = gp.ArrayKey(\"GRADIENT_EMBEDDING\")\n gradient_fg = gp.ArrayKey(\"GRADIENT_FG\")\n emst = gp.ArrayKey(\"EMST\")\n edges_u = gp.ArrayKey(\"EDGES_U\")\n edges_v = gp.ArrayKey(\"EDGES_V\")\n\n request = gp.BatchRequest()\n request.add(raw, (200, 200))\n request.add(gt, (160, 160))\n\n snapshot_request = gp.BatchRequest()\n snapshot_request[embedding] = request[gt]\n snapshot_request[fg] = request[gt]\n snapshot_request[gt_fg] = request[gt]\n snapshot_request[maxima] = request[gt]\n snapshot_request[gradient_embedding] = request[gt]\n snapshot_request[gradient_fg] = request[gt]\n snapshot_request[emst] = gp.ArraySpec()\n snapshot_request[edges_u] = gp.ArraySpec()\n snapshot_request[edges_v] = gp.ArraySpec()\n\n pipeline = (\n Synthetic2DSource(raw, gt)\n + gp.Normalize(raw)\n + gp.tensorflow.Train(\n \"train_net\",\n optimizer=add_loss,\n loss=None,\n inputs={tensor_names[\"raw\"]: raw, tensor_names[\"gt_labels\"]: gt},\n outputs={\n tensor_names[\"embedding\"]: embedding,\n tensor_names[\"fg\"]: fg,\n \"maxima:0\": maxima,\n \"gt_fg:0\": gt_fg,\n emst_name: emst,\n edges_u_name: edges_u,\n edges_v_name: edges_v,\n },\n gradients={\n tensor_names[\"embedding\"]: gradient_embedding,\n tensor_names[\"fg\"]: gradient_fg,\n },\n )\n + gp.Snapshot(\n output_filename=\"{iteration}.hdf\",\n dataset_names={\n raw: \"volumes/raw\",\n gt: \"volumes/gt\",\n embedding: \"volumes/embedding\",\n fg: \"volumes/fg\",\n maxima: \"volumes/maxima\",\n gt_fg: \"volumes/gt_fg\",\n gradient_embedding: \"volumes/gradient_embedding\",\n gradient_fg: \"volumes/gradient_fg\",\n emst: \"emst\",\n edges_u: \"edges_u\",\n edges_v: \"edges_v\",\n },\n dataset_dtypes={maxima: np.float32, gt_fg: np.float32},\n every=100,\n additional_request=snapshot_request,\n )\n )\n\n with gp.build(pipeline):\n for i in range(n_iterations):\n pipeline.request_batch(request)\n\n\nif __name__ == \"__main__\":\n train(1000000)\n"
] |
[
[
"numpy.zeros_like",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"numpy.zeros",
"tensorflow.equal",
"scipy.ndimage.filters.gaussian_filter",
"numpy.random.randn",
"tensorflow.transpose",
"tensorflow.reshape",
"scipy.ndimage.filters.maximum_filter",
"numpy.random.randint",
"tensorflow.losses.mean_squared_error",
"tensorflow.greater",
"numpy.unique",
"scipy.ndimage.morphology.distance_transform_edt"
]
] |
marioliu/AutonomousQuadblade
|
[
"08fe54fe37df89ffc7e6378125bb14ad5bead421"
] |
[
"Camera/camera.py"
] |
[
"'''\nAdapted from https://github.com/IntelligentQuadruped, with permission\nDescription: Module to connect to camera and retrieve RGB and depth data. Currently supports the Intel RealSense R200 Camera.\n'''\n\nimport numpy as np\nimport logging\nimport time\nimport cv2\nimport matplotlib.pyplot as plt\nfrom skimage.transform import rescale\nfrom file_support import ensureDir\nfrom os import path, makedirs\n\ntry:\n import pyrealsense as pyrs\nexcept ImportError as error:\n logging.warning(\"cam.py: \" + str(error))\n\nclass Camera:\n \"\"\"\n Object to get data from R200\n \"\"\"\n def __init__(self, max_depth = 4.0, save_images = False, \\\n t_buffer = 5, output_dir = './Trials/'):\n \"\"\"\n Intitalizes Camera object \n \"\"\"\n self.max_depth = max_depth\n self.save_images = save_images\n self.clock = time.time()\n self.t_buffer = t_buffer\n self.output_dir = output_dir\n self.data_dir = path.join(self.output_dir,\"{}\".format(time.strftime(\"%d_%b_%Y_%H:%M\", time.localtime())))\n\n if self.save_images:\t\n ensureDir(self.data_dir)\n pass\n\n np.warnings.filterwarnings('ignore')\n\n def connect(self):\n \"\"\"\n Establishes connection to R200 camera\n \"\"\"\n logging.info(\"Cam.py: connecting components\")\n self.serv = pyrs.Service()\n self.dev = self.serv.Device(device_id=0, \n streams=[\\\n pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])\n\n def disconnect(self):\n \"\"\"\n Disconnects from R200 camera\n \"\"\"\n self.dev.stop()\n self.serv.stop()\n logging.info(\"Cam.py: camera disconnected\")\n\n def getFrames(self, frames = 5, rgb = False):\n \"\"\"\n Retrieves depth frames (and RGB if true) from R200 input, cleans and averages depth images\n \"\"\"\n self.dev.wait_for_frames()\n\n # Convert depth to meters\n depth = self.dev.depth * self.dev.depth_scale\n col = self.dev.color\n\n if self.save_images and (time.time() - self.clock > self.t_buffer):\n np.save(path.join(self.data_dir,str(time.time())+\"_d\"),depth)\n np.save(path.join(self.data_dir,str(time.time())+\"_c\"),col)\n self.clock = time.time()\n\n for _ in range(frames-1):\n self.dev.wait_for_frames()\n # Convert depth to meters\n curr = self.dev.depth * self.dev.depth_scale\n depth = np.dstack((depth, curr))\n\n if frames != 1:\n depth = np.nanmean(depth, 2)\n\n depth[depth <= 0] = np.nan\n depth[depth > self.max_depth] = np.nan\n\n if rgb:\n return depth, col\n\n return depth\n\n def reduceFrame(self, depth, height_ratio = 0.5, sub_sample = 0.3, reduce_to = 'lower'):\n \"\"\"\n Takes in a depth image and rescales it\n\n Args:\n height_ratio: Determines fraction of rows to keep\n sub_sample: Scaling factor for image\n \"\"\"\n if (height_ratio > 1.0) or (height_ratio < 0.0)\\\n or (sub_sample > 1.0) or (sub_sample < 0.0):\n print('height_ratio and sub_sample must be between 0 and 1')\n exit(1)\n \n depth_copy = depth.copy()\n height = depth_copy.shape[0]\n h = int(height_ratio*(height))\n cols_to_cut = 0\n\n # catches the case when all rows are kept\n if height_ratio == 1:\n d_short = depth_copy\n\n elif reduce_to == 'lower':\n d_short = depth_copy[(height - h):,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'middle_lower':\n upper_brdr = int(3*(height/4.0) - h/2)\n lower_brdr = upper_brdr + h\n d_short = depth_copy[upper_brdr:lower_brdr,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'middle':\n upper_brdr = int((height - h)/2.0)\n lower_brdr = upper_brdr + h\n d_short = depth_copy[upper_brdr:lower_brdr,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'middle_upper':\n upper_brdr = int((height/4.0) - h/2)\n lower_brdr = upper_brdr + h\n d_short = depth_copy[upper_brdr:lower_brdr,\\\n cols_to_cut:-(cols_to_cut+1)]\n\n elif reduce_to == 'upper':\n d_short = depth_copy[:h, cols_to_cut:-(cols_to_cut+1)]\n\n d_short[d_short <= 0] = np.nan\n d_short[d_short > self.max_depth] = np.nan\n \n rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)\n\n return rescaled\n\ndef main():\n \"\"\"\n Unit tests\n \"\"\"\n max_depth = 4.0\n numFrames = 10\n # height_ratio of 0 crops 0 rows away\n height_ratio = 0.5\n sub_sample = 1\n # reduce_to argument can be: 'lower', 'middle_lower', 'middle', 'middle_upper', and 'upper'\n reduce_to = 'middle_lower'\n\n print('Program settings:')\n print('\\tmax_depth: ' + str(max_depth))\n print('\\tnumFrames: ' + str(numFrames))\n print('\\theight_ratio: ' + str(height_ratio))\n print('\\tsub_sample: ' + str(sub_sample))\n print('\\treduce_to: ' + reduce_to)\n\n cam = Camera(max_depth = max_depth)\n cam.connect()\n time.sleep(2.5)\n\n t1 = time.time()\n d = cam.getFrames(numFrames)\n t2 = time.time()\n printStmt = 'Time to get {0} frames: ' + str(t2 - t1)\n print(printStmt.format(numFrames))\n d_small = cam.reduceFrame(d, height_ratio = height_ratio, sub_sample = sub_sample, reduce_to = reduce_to)\n\n # colormap:\n # https://matplotlib.org/tutorials/colors/colormaps.html\n\n # scaled depth\n plt.figure(figsize = (6, 7)) # figsize = width, height\n ax2 = plt.subplot(2, 1, 2)\n plt.imshow(d_small, cmap='gist_rainbow')\n plt.colorbar()\n plt.title('Scaled (height_ratio = {0}, sub_sample = {1})'.format(height_ratio, sub_sample))\n plt.grid()\n\n # original depth\n # plt.subplot(2, 1, 1, sharex=ax2, sharey=ax2)\n plt.subplot(2, 1, 1)\n plt.imshow(d, cmap='gist_rainbow')\n plt.colorbar()\n plt.title('Original')\n plt.grid()\n\n plt.subplots_adjust(hspace = 0.3)\n\n plt.show()\n cam.disconnect()\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"matplotlib.pyplot.colorbar",
"numpy.warnings.filterwarnings",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.nanmean",
"numpy.dstack",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplot"
]
] |
joebartusek/graf
|
[
"80e1014a1def2660a44188c69021f0c498b6cef9"
] |
[
"submodules/GAN_stability/gan_training/checkpoints.py"
] |
[
"\nimport os\nimport urllib\nimport torch\nfrom torch.utils import model_zoo\n\n\nclass CheckpointIO(object):\n ''' CheckpointIO class.\n\n It handles saving and loading checkpoints.\n\n Args:\n checkpoint_dir (str): path where checkpoints are saved\n '''\n def __init__(self, checkpoint_dir='./chkpts', **kwargs):\n self.module_dict = kwargs\n self.checkpoint_dir = checkpoint_dir\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n def register_modules(self, **kwargs):\n ''' Registers modules in current module dictionary.\n '''\n self.module_dict.update(kwargs)\n\n def save(self, filename, **kwargs):\n ''' Saves the current module dictionary.\n\n Args:\n filename (str): name of output file\n '''\n if not os.path.isabs(filename):\n filename = os.path.join(self.checkpoint_dir, filename)\n\n outdict = kwargs\n for k, v in self.module_dict.items():\n outdict[k] = v.state_dict()\n torch.save(outdict, filename)\n\n def load(self, filename):\n '''Loads a module dictionary from local file or url.\n \n Args:\n filename (str): name of saved module dictionary\n '''\n if is_url(filename):\n return self.load_url(filename)\n else:\n return self.load_file(filename)\n\n def load_file(self, filename):\n '''Loads a module dictionary from file.\n \n Args:\n filename (str): name of saved module dictionary\n '''\n\n if not os.path.isabs(filename):\n filename = os.path.join(self.checkpoint_dir, filename)\n\n if os.path.exists(filename):\n print(filename)\n print('=> Loading checkpoint from local file...')\n state_dict = torch.load(filename)\n scalars = self.parse_state_dict(state_dict)\n return scalars\n else:\n raise FileNotFoundError\n\n def load_url(self, url):\n '''Load a module dictionary from url.\n \n Args:\n url (str): url to saved model\n '''\n print(url)\n print('=> Loading checkpoint from url...')\n state_dict = model_zoo.load_url(url, progress=True)\n scalars = self.parse_state_dict(state_dict)\n return scalars\n\n def parse_state_dict(self, state_dict):\n '''Parse state_dict of model and return scalars.\n \n Args:\n state_dict (dict): State dict of model\n '''\n\n for k, v in self.module_dict.items():\n if k in state_dict:\n v.load_state_dict(state_dict[k])\n else:\n print('Warning: Could not find %s in checkpoint!' % k)\n scalars = {k: v for k, v in state_dict.items()\n if k not in self.module_dict}\n return scalars\n\ndef is_url(url):\n scheme = urllib.parse.urlparse(url).scheme\n return scheme in ('http', 'https')"
] |
[
[
"torch.save",
"torch.utils.model_zoo.load_url",
"torch.load"
]
] |
VV123/NLIDB_gradient
|
[
"f42a6f383d2d4ac41c354cf55df2a21507577b02"
] |
[
"main.py"
] |
[
"# coding=utf-8\nimport sys\nimport argparse\n\nimport os\nfrom tensorflow.python.platform import gfile\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.layers.core import Dense\nfrom utils.data_manager import load_data, load_data_one\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\nfrom decode_helper import decode_one\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nfrom tf_helper import train, evaluate, decode_data, decode_data_recover\nfrom model1 import construct_graph\n\n\ndef init_args():\n arg_parser = argparse.ArgumentParser()\n\n arg_parser.add_argument(\n '--data_path',\n default=os.path.dirname(os.path.abspath(__file__)) + '/data',\n type=str,\n help='Data path.')\n arg_parser.add_argument(\n '--load_data', default=False, type=bool, help='Load data.')\n arg_parser.add_argument(\n '--data',\n choices=['wikisql', 'spider', 'overnight', 'overnight_set'],\n default='wikisql',\n help='data to train & test')\n #arg_parser.add_argument('--tran_data', choices=['wikisql', 'spider', 'overnight'], default='overnight', help='data to transfer')\n arg_parser.add_argument(\n '--subset', choices=['all'], default='all', help='Subset of data.')\n arg_parser.add_argument(\n '--maxlen', default=60, type=int, help='Data record max length.')\n\n arg_parser.add_argument(\n '--annotation_path',\n default=os.path.dirname(os.path.abspath(__file__)) +\n '/data/DATA/wiki/',\n type=str,\n help='Data annotation path.')\n arg_parser.add_argument(\n '--mode',\n choices=['train', 'infer', 'transfer','txt'],\n default='infer',\n help='Run mode')\n #### Model configuration ####\n arg_parser.add_argument(\n '--cell',\n choices=['gru'],\n default='gru',\n help='Type of cell used, currently only standard GRU cell is supported'\n )\n arg_parser.add_argument(\n '--output_vocab_size',\n default=20637,\n #default=20452,\n type=int,\n help='Output vocabulary size.')\n # Embedding sizes\n arg_parser.add_argument(\n '--embedding_dim',\n default=300,\n type=int,\n help='Size of word embeddings')\n\n #Hidden sizes\n arg_parser.add_argument(\n '--dim', default=400, type=int, help='Size of GRU hidden states')\n arg_parser.add_argument(\n '--hidden_size',\n default=256,\n type=int,\n help='Size of LSTM hidden states')\n\n arg_parser.add_argument(\n '--no_copy',\n default=False,\n action='store_true',\n help='Do not use copy mechanism')\n\n #### Training ####\n arg_parser.add_argument(\n '--vocab', type=str, help='Path of the serialized vocabulary')\n arg_parser.add_argument(\n '--glove_embed_path',\n default=None,\n type=str,\n help='Path to pretrained Glove mebedding')\n\n arg_parser.add_argument(\n '--batch_size', default=128, type=int, help='Batch size')\n arg_parser.add_argument(\n '--in_drop', default=0., type=float, help='In dropout rate')\n arg_parser.add_argument(\n '--out_drop', default=0., type=float, help='Out dropout rate')\n\n # training details\n arg_parser.add_argument(\n '--valid_epoch_interval',\n default=1,\n type=int,\n help='Perform validation every x epoch')\n arg_parser.add_argument(\n '--clip_grad', default=5., type=float, help='Clip gradients')\n arg_parser.add_argument(\n '--total_epochs', default=40, type=int, help='# of training epoches')\n arg_parser.add_argument(\n '--epochs', default=1, type=int, help='Record per x epoches')\n arg_parser.add_argument(\n '--lr', default=0.0001, type=float, help='Learning rate')\n arg_parser.add_argument(\n '--lr_decay',\n default=0.5,\n type=float,\n help='decay learning rate if the validation performance drops')\n\n #### decoding/validation/testing ####\n arg_parser.add_argument(\n '--load_model', default=False, type=bool, help='Whether to load model')\n arg_parser.add_argument(\n '--beam_width', default=5, type=int, help='Beam size for beam search')\n arg_parser.add_argument(\n '--decode_max_time_step',\n default=100,\n type=int,\n help='Maximum number of time steps used '\n 'in decoding and sampling')\n args = arg_parser.parse_args()\n return args\n\ndef model(args, train_env, infer_env):\n\n tf.reset_default_graph()\n train_graph = tf.Graph()\n infer_graph = tf.Graph()\n\n with train_graph.as_default():\n train_env.x = tf.placeholder(\n tf.int32, shape=[None, args.maxlen], name='x')\n train_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')\n train_env.training = tf.placeholder_with_default(\n False, (), name='train_mode')\n train_env.train_op, train_env.loss, train_env.acc, sample_ids, logits = construct_graph(\n \"train\", train_env, args)\n train_env.saver = tf.train.Saver()\n #[print(n.name) for n in tf.get_default_graph().as_graph_def().node if 'xxxxx' in n.name]\n\n with infer_graph.as_default():\n infer_env.x = tf.placeholder(\n tf.int32, shape=[None, args.maxlen], name='x')\n infer_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')\n infer_env.training = tf.placeholder_with_default(\n False, (), name='train_mode')\n _, infer_env.loss, infer_env.acc, infer_env.pred_ids, _ = construct_graph(\n \"infer\", infer_env, args)\n infer_env.infer_saver = tf.train.Saver()\n\n return train_graph, infer_graph\n\n\ndef inferrence(args):\n args.load_model = True\n\n class Dummy:\n pass\n\n train_env = Dummy()\n infer_env = Dummy()\n _, infer_graph = model(args, train_env, infer_env)\n\n args.data = 'wikisql'\n args.load_data = True\n X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')\n X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')\n X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev') \n #X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)\n model2load = 'model/{}'.format(args.subset)\n\n sess = tf.InteractiveSession(graph=infer_graph)\n infer_env.infer_saver.restore(sess, model2load)\n print('===========dev set============')\n decode_data(sess, infer_env, X_dev, y_dev)\n em = decode_data_recover(sess, infer_env, X_dev, y_dev, 'dev')\n print('==========test set===========')\n decode_data(sess, infer_env, X_test, y_test)\n test_em = decode_data_recover(sess, infer_env, X_test, y_test,\n 'test')\n\n return\n\ndef infer_one(args):\n args.load_model = True\n\n class Dummy:\n pass\n\n train_env = Dummy()\n infer_env = Dummy()\n _, infer_graph = model(args, train_env, infer_env)\n\n args.data = 'wikisql'\n args.load_data = True\n model2load = 'model/{}'.format(args.subset)\n\n sess = tf.InteractiveSession(graph=infer_graph)\n infer_env.infer_saver.restore(sess, model2load)\n print('===========decode============')\n X_one = load_data_one(args.maxlen, 'qs.txt')\n decode_one(sess, infer_env, X_one)\n\n return\n\ndef train_model(args):\n class Dummy:\n pass\n\n train_env = Dummy()\n infer_env = Dummy()\n\n train_graph, infer_graph = model(args, train_env, infer_env)\n\n args.data = 'wikisql'\n args.load_data = True\n args.load_model = False\n X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')\n X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')\n X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev') \n #X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)\n model2load = 'model/{}'.format(args.subset)\n max_em, global_test_em, best_base = -1, -1, -1\n acc = 0\n sess1 = tf.InteractiveSession(graph=train_graph)\n sess1.run(tf.global_variables_initializer())\n sess1.run(tf.local_variables_initializer())\n sess2 = tf.InteractiveSession(graph=infer_graph)\n sess2.run(tf.global_variables_initializer())\n sess2.run(tf.global_variables_initializer())\n for base in range(args.total_epochs / args.epochs):\n print('\\nIteration: %d (%d epochs)' % (base, args.epochs))\n model2load = train(\n sess1,\n train_env,\n X_train,\n y_train,\n epochs=args.epochs,\n load=args.load_model,\n name=args.subset,\n batch_size=args.batch_size,\n base=base,\n model2Bload=model2load)\n args.load_model = True\n infer_env.infer_saver.restore(sess2, model2load)\n\n print('===========dev set============')\n dev_em = decode_data(sess2, infer_env, X_dev, y_dev)\n dev_em = decode_data_recover(sess2, infer_env, X_dev, y_dev,\n 'dev')\n print('==========test set===========')\n test_em = decode_data(sess2, infer_env, X_test, y_test)\n test_em = decode_data_recover(sess2, infer_env, X_test, y_test,\n 'test')\n\n if dev_em > max_em:\n max_em = dev_em\n global_test_em = test_em\n best_base = base\n print('\\n Saving model for best testing')\n train_env.saver.save(sess1, 'best_model/{0}-{1}-{2:.2f}'.format(args.subset, base, max_em))\n print('Max EM acc: %.4f during %d iteration.' % (max_em, best_base))\n print('test EM acc: %.4f ' % global_test_em)\n return\n\n\ndef transfer(args):\n\n load_model = args.load_model if args.mode == 'train' else True\n\n class Dummy:\n pass\n\n train_env = Dummy()\n infer_env = Dummy()\n\n _, infer_graph = model(args, train_env, infer_env)\n\n args.data = 'overnight'\n args.load_data = True\n #X_tran, y_tran = load_data(args)\n X_tran, y_tran = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight')\n args.data = 'overnight_set'\n #tran_sets = load_data(args)\n tran_sets = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight_set')\n model2load = 'model/{}'.format(args.subset)\n\n sess = tf.InteractiveSession(graph=infer_graph)\n infer_env.infer_saver.restore(sess, model2load)\n\n print('========subset transfer set========')\n subsets = ['basketball', 'calendar', 'housing', 'recipes', 'restaurants']\n for subset, (X_tran_subset, y_tran_subset) in zip(subsets, tran_sets):\n print('---------' + subset + '---------')\n tran_em = decode_data(\n sess,\n infer_env,\n X_tran_subset,\n y_tran_subset,\n filename=str(subset + '.txt'))\n print('===========transfer set============')\n tran_em = decode_data(sess, infer_env, X_tran, y_tran)\n return\n\n\nif __name__ == '__main__':\n args = init_args()\n print(args)\n if args.mode == 'train':\n print('\\nTrain model.')\n train_model(args)\n elif args.mode == 'infer':\n print('\\nInference.')\n inferrence(args)\n elif args.mode == 'txt':\n print('\\nInference from txt.')\n infer_one(args)\n elif args.mode == 'transfer':\n print('\\nTransfer.')\n transfer(args)\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.reset_default_graph",
"tensorflow.train.Saver",
"tensorflow.placeholder",
"tensorflow.local_variables_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.placeholder_with_default",
"tensorflow.InteractiveSession"
]
] |
JasonKeirstead/kestrel-analytics
|
[
"4b8ab9b43ff3f73616e5a1a902f8c46bb00b83c0"
] |
[
"template/analytics.py"
] |
[
"#!/usr/bin/env python3\n\nimport pandas as pd\n\n# Kestrel analytics default paths (single input variable)\nINPUT_DATA_PATH = \"/data/input/0.parquet.gz\"\nOUTPUT_DATA_PATH = \"/data/output/0.parquet.gz\"\nOUTPUT_DISPLAY = \"/data/display/ret.html\"\n\ndef analytics(dataframe):\n # analyze data in dataframe\n\n # provide insights or additional knowledge\n newattr = [\"newval\" + str(i) for i in range(dataframe.shape[0])]\n dataframe[\"x_new_attr\"] = newattr\n\n display = \"<p>Hello World! -- a Kestrel analytics</p>\"\n\n # return the updated Kestrel variable\n return dataframe, display\n\nif __name__ == \"__main__\":\n dfi = pd.read_parquet(INPUT_DATA_PATH)\n dfo, disp = analytics(dfi)\n dfo.to_parquet(OUTPUT_DATA_PATH, compression=\"gzip\")\n with open(OUTPUT_DISPLAY, \"w\") as o:\n o.write(disp)\n"
] |
[
[
"pandas.read_parquet"
]
] |
InferLO/inferlo
|
[
"a65efce721d7f99d2f274dd94a1aaf7ca159e944"
] |
[
"inferlo/generic/libdai_bp.py"
] |
[
"# Copyright (c) 2020, The InferLO authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 - see LICENSE file.\nfrom __future__ import annotations\n\nimport random\nimport time\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, List, Callable, Dict\n\nimport numpy as np\n\nfrom inferlo.base.factors.discrete_factor import DiscreteFactor\nfrom inferlo.base import InferenceResult\n\nif TYPE_CHECKING:\n from inferlo import GraphModel\n\nrecordSentMessages = True\n\n\nclass Prob:\n \"\"\"Equivalent of dai::Prob.\n\n Wrapper around a vector - represents probability distribution.\n \"\"\"\n\n @staticmethod\n def uniform(n):\n \"\"\"Creates unifom probability distribution.\"\"\"\n return Prob.same_value(n, 1.0 / n)\n\n @staticmethod\n def same_value(n: int, val: float):\n \"\"\"Creates vector filled with the same value.\"\"\"\n return Prob(np.ones(n, dtype=np.float64) * val)\n\n def __init__(self, p: np.ndarray):\n self.p = p\n\n def fill(self, x):\n \"\"\"Sets all entries to x.\"\"\"\n self.p = np.ones_like(self.p) * x\n\n def clone(self):\n \"\"\"Makes a copy.\"\"\"\n return Prob(np.array(self.p))\n\n def __imul__(self, other):\n self.p *= other.p\n return self\n\n def __iadd__(self, other):\n self.p += other.p\n return self\n\n def normalize(self):\n \"\"\"Normalize distribution.\"\"\"\n self.p /= np.sum(self.p)\n\n def entropy(self) -> float:\n \"\"\"Calculate entropy of the distribution.\"\"\"\n return - np.sum(self.p * np.log(self.p))\n\n def __str__(self):\n return str(self.p)\n\n\ndef dist_kl(p: Prob, q: Prob):\n \"\"\"Kullback-Leibler divergence between two probability distributions.\"\"\"\n kl_div = p.p * (np.log(p.p + (p == 0)) - np.log(q.p + (p.p == 0)))\n return np.sum(kl_div)\n\n\ndef dist_linf(p: Prob, q: Prob):\n \"\"\"Distance between two probability distributions in L_infinity norm.\"\"\"\n return np.max(np.abs(p.p - q.p))\n\n\n@dataclass\nclass Neighbor:\n \"\"\"Describes the neighbor relationship of two nodes in a graph.\n\n Corresponds to dai::Neighbor.\n \"\"\"\n # Corresponds to the index of this Neighbor entry in the vector of\n # neighbors.\n iter: int\n # Contains the absolute index of the neighboring node.\n node: int\n # Contains the \"dual\" index (i.e., the index of this node in the Neighbors\n # vector of the neighboring node)\n dual: int\n\n\n@dataclass\nclass EdgeProp:\n \"\"\"Type used for storing edge properties.\"\"\"\n index: np.ndarray # Index cached for this edge.\n message: Prob # Old message living on this edge.\n new_message: Prob # New message living on this edge\n residual: float # Residual for this edge\n\n\nclass LDFactor:\n \"\"\"Equivalent of dai::Factor.\n\n Consists of set of variables and flattened values assigned to all var\n combinations. Variables are assigned like in Inferlo, but tensor is\n transposed before flattening.\n \"\"\"\n\n def __init__(self, model: GraphModel, var_idx: List[int], p: Prob):\n self.model = model\n self.var_idx = var_idx\n self.p = p\n\n @staticmethod\n def uniform(model: GraphModel, var_idx: List[int]):\n \"\"\"Creates factor defining uniform distribution.\"\"\"\n total_domain_size = 1\n for i in var_idx:\n total_domain_size *= model.get_variable(i).domain.size()\n return LDFactor(model, var_idx, Prob.uniform(total_domain_size))\n\n @staticmethod\n def from_inferlo_factor(f: DiscreteFactor):\n \"\"\"Converts inferlo.DiscreteFactor to LDFactor.\"\"\"\n rev_perm = list(range(len(f.var_idx)))[::-1]\n prob = f.values.transpose(rev_perm).reshape(-1)\n return LDFactor(f.model, f.var_idx, Prob(prob))\n\n def to_inferlo_factor(self) -> DiscreteFactor:\n \"\"\"Converts LDFactor to inferlo.DiscreteFactor.\"\"\"\n sizes = [self.model.get_variable(i).domain.size()\n for i in self.var_idx[::-1]]\n libdai_tensor = self.p.p.reshape(sizes)\n rev_perm = list(range(len(self.var_idx)))[::-1]\n inferlo_tensor = libdai_tensor.transpose(rev_perm)\n return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)\n\n def combine_with_factor(self, other: LDFactor,\n func: Callable[[float, float], float]):\n \"\"\"Applies binary function to two factors.\"\"\"\n # Check that variables of the other factor are subset of variables of\n # the given factor.\n for i in other.var_idx:\n assert i in self.var_idx\n\n # Now, update every value of given factor with corresponding value of\n # the other factor.\n for idx in range(len(self.p.p)):\n j = other._encode_value_index(self._decode_value_index(idx))\n self.p.p[idx] = func(self.p.p[idx], other.p.p[j])\n return self\n\n def __iadd__(self, other: LDFactor):\n return self.combine_with_factor(other, lambda x, y: x + y)\n\n def __imul__(self, other: LDFactor):\n return self.combine_with_factor(other, lambda x, y: x * y)\n\n def marginal(self, new_var_idx, normed=True) -> LDFactor:\n \"\"\"Sums factor over some variables.\"\"\"\n result = self.to_inferlo_factor().marginal(new_var_idx)\n result = LDFactor.from_inferlo_factor(result)\n if normed:\n result.p.normalize()\n return result\n\n def max_marginal(self, new_var_idx, normed=True) -> LDFactor:\n \"\"\"Eleiminates certain variables by finding maximum.\"\"\"\n result = self.to_inferlo_factor().max_marginal(new_var_idx)\n result = LDFactor.from_inferlo_factor(result)\n if normed:\n result.p.normalize()\n return result\n\n def clone(self):\n \"\"\"Makes a copy of this factor.\"\"\"\n return LDFactor(self.model, self.var_idx, self.p.clone())\n\n def _decode_value_index(self, idx):\n \"\"\"Returns dict from variable id to variable value.\"\"\"\n ans = dict()\n for var_id in self.var_idx:\n size = self.model.get_variable(var_id).domain.size()\n ans[var_id] = idx % size\n idx //= size\n return ans\n\n def _encode_value_index(self, var_values: Dict[int, int]):\n ans = 0\n base = 1\n for var_id in self.var_idx:\n size = self.model.get_variable(var_id).domain.size()\n ans += base * var_values[var_id]\n base *= size\n return ans\n\n def __str__(self):\n return \"%s %s\" % (self.var_idx, self.p.p)\n\n\nclass BP:\n \"\"\"Belief propagation algorithm, equivalent to dai::BP.\n\n This class is ported from libDAI's dai::BP class. It runs belief\n propagation algorithm for graphical model with discrete variables with\n arbitrary factor graph.\n\n At the moment MAXPROD algorithm (for finding MAP state) is not supported.\n\n Use BP.infer() to perform inference.\n \"\"\"\n\n @staticmethod\n def infer(model, options=None):\n \"\"\"Runs inference BP algorithm for given model.\n\n Supports all options which libdai::BP supports. Refer to libDAI\n documentation for options descritpion.\n \"\"\"\n if options is None:\n options = {'tol': 1e-9, 'logdomain': 0, 'updates': 'SEQRND'}\n inf_alg = BP(model, options)\n inf_alg.init()\n inf_alg.run()\n return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())\n\n def __init__(self, model: GraphModel, props: Dict[str, str]):\n # Stores all edge properties\n self._edges: List[List[EdgeProp]] = []\n # Maximum difference between variable beliefs encountered so far\n self._maxdiff = 0.0\n # Number of iterations needed\n self._iters = 0\n # The history of message updates (only recorded if \\a\n # recordSentMessages is \\c true)\n self._sentMessages = []\n # Stores variable beliefs of previous iteration\n self._oldBeliefsV: List[LDFactor] = []\n # Stores factor beliefs of previous iteration\n self._old_beliefs_f: List[LDFactor] = []\n # Stores the update schedule\n self._update_seq = []\n\n self.model = model\n self.factors = [\n LDFactor.from_inferlo_factor(\n DiscreteFactor.from_factor(f)) for f in model.get_factors()]\n self.nrVars = model.num_variables\n self.nrFactors = len(self.factors)\n\n # Prepare Neighbors.\n # For every variable - factors, referencing it.\n self.nbV: List[List[Neighbor]] = [[] for _ in range(self.nrVars)]\n # For every factor - variables it references.\n self.nbF: List[List[Neighbor]] = [[] for _ in range(self.nrFactors)]\n for factor_id in range(len(self.factors)):\n factor = self.factors[factor_id]\n for var_iter_index in range(len(factor.var_idx)):\n var_id = factor.var_idx[var_iter_index]\n nbv_len = len(self.nbV[var_id])\n nbf_len = len(self.nbF[factor_id])\n assert var_iter_index == nbf_len\n self.nbV[var_id].append(\n Neighbor(\n iter=nbv_len,\n node=factor_id,\n dual=nbf_len))\n self.nbF[factor_id].append(\n Neighbor(\n iter=nbf_len,\n node=var_id,\n dual=nbv_len))\n\n # Parse properties.\n self.logdomain = bool(int(props.get('logdomain', 0)))\n self.updates = props['updates']\n self.inference = props.get('inference', 'SUMPROD')\n self.verbose = int(props.get('verbose', 0))\n self.damping = float(props.get('damping', 0.0))\n self.maxiter = int(props.get('maxiter', 10000))\n self.maxtime = float(props.get('maxtime', np.inf))\n self.tol = float(props['tol'])\n\n self._construct()\n\n def _construct(self):\n \"\"\"Helper function for constructors.\"\"\"\n # Create edge properties\n self._edges = []\n for i in range(self.nrVars):\n self._edges.append([])\n for _ in self.nbV[i]:\n size = self._var_size(i)\n new_ep = EdgeProp(\n index=None,\n message=Prob.uniform(size),\n new_message=Prob.uniform(size),\n residual=0.0)\n self._edges[i].append(new_ep)\n\n # Create old beliefs\n self._oldBeliefsV = []\n for i in range(self.nrVars):\n self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))\n self._old_beliefs_f = []\n for ii in range(self.nrFactors):\n self._old_beliefs_f.append(\n LDFactor.uniform(\n self.model,\n self.factors[ii].var_idx))\n\n # Create update sequence\n self._update_seq = []\n for ii in range(self.nrFactors):\n for i in self.nbF[ii]:\n self._update_seq.append((i.node, i.dual))\n\n def init(self):\n \"\"\"Initializes messages awith default values.\"\"\"\n c = 0.0 if self.logdomain else 1.0\n for i in range(self.nrVars):\n for ii in self.nbV[i]:\n self._edges[i][ii.iter].message.fill(c)\n self._edges[i][ii.iter].new_message.fill(c)\n if self.updates == 'SEQMAX':\n self._update_residual(i, ii.iter, 0.0)\n self._iters = 0\n\n def find_max_residual(self):\n \"\"\"Find max residual.\"\"\"\n # TODO: optimize with a lookup table.\n max_r = -np.inf\n best_edge = None\n for i in range(self.nrVars):\n for _I in range(len(self.nbV[i])):\n if self._edges[i][_I].residual > max_r:\n max_r = self._edges[i][_I].residual\n best_edge = i, _I\n return best_edge\n\n def _calc_incoming_message_product(\n self,\n ii: int,\n without_i: bool,\n i: int) -> Prob:\n \"\"\"Calculate the product of factor \\a I and the incoming messages.\n\n If without_i == True, the message coming from variable i is omitted\n from the product.\n\n This function is used by calc_new_message and calc_belief_f.\n \"\"\"\n f_prod = self.factors[ii].clone()\n if self.logdomain:\n f_prod.p.p = np.log(f_prod.p.p)\n\n # Calculate product of incoming messages and factor I\n for j in self.nbF[ii]:\n if without_i and (j.node == i):\n continue\n\n # prod_j will be the product of messages coming into j\n size = self._var_size(j.node)\n default_val = 0.0 if self.logdomain else 1.0\n prod_j = Prob.same_value(size, default_val)\n for J in self.nbV[j.node]:\n if J.node != ii: # for all J in nb(j) \\ I\n if self.logdomain:\n prod_j += self._edges[j.node][J.iter].message\n else:\n prod_j *= self._edges[j.node][J.iter].message\n\n # multiply prod with prod_j\n if self.logdomain:\n f_prod += LDFactor(self.model, [j.node], prod_j)\n else:\n f_prod *= LDFactor(self.model, [j.node], prod_j)\n return f_prod.p\n\n def _calc_new_message(self, i: int, _I: int):\n # calculate updated message I->i\n ii = self.nbV[i][_I].node\n\n if len(self.factors[ii].var_idx) == 1: # optimization\n marg = self.factors[ii].p.clone()\n else:\n Fprod = self.factors[ii].clone()\n Fprod.p = self._calc_incoming_message_product(ii, True, i)\n\n if self.logdomain:\n Fprod.p.p = np.exp(Fprod.p.p - np.max(Fprod.p.p))\n\n # Marginalize onto i\n if self.inference == 'SUMPROD':\n marg = Fprod.marginal([i]).p\n else:\n marg = Fprod.max_marginal([i]).p\n\n # Store result\n if self.logdomain:\n self._edges[i][_I].new_message = Prob(np.log(marg.p))\n else:\n self._edges[i][_I].new_message = marg\n\n # Update the residual if necessary\n if self.updates == 'SEQMAX':\n self._update_residual(\n i,\n _I,\n dist_linf(\n self._edges[i][_I].new_message,\n self._edges[i][_I].message))\n\n # BP::run does not check for NANs for performance reasons\n # Somehow NaNs do not often occur in BP...\n def run(self):\n \"\"\"Runs BP algorithm.\"\"\"\n tic = time.time()\n\n # Do several passes over the network until maximum number of iterations\n # has been reached or until the maximum belief difference is smaller\n # than tolerance.\n max_diff = np.inf\n while (self._iters < self.maxiter) and (\n max_diff > self.tol) and (time.time() - tic) < self.maxtime:\n if self.updates == 'SEQMAX':\n if self._iters == 0:\n # do the first pass\n for i in range(self.nrVars):\n for ii in self.nbV[i]:\n self._calc_new_message(i, ii.iter)\n # Maximum-Residual BP [\\ref EMK06]\n for _ in range(len(self._update_seq)):\n # Update the message with the largest residual.\n i, _I = self.find_max_residual()\n self._update_message(i, _I)\n\n # I->i has been updated, which means that residuals for all\n # J->j with J in nb[i]\\I and j in nb[J]\\i have to be\n # updated\n for J in self.nbV[i]:\n if J.iter != _I:\n for j in self.nbF[J.node]:\n _J = j.dual\n if j != i:\n self._calc_new_message(j.node, _J)\n elif self.updates == 'PARALL':\n # Parallel updates\n for i in range(self.nrVars):\n for ii in self.nbV[i]:\n self._calc_new_message(i, ii.iter)\n\n for i in range(self.nrVars):\n for ii in self.nbV[i]:\n self._update_message(i, ii.iter)\n else:\n # Sequential updates\n if self.updates == 'SEQRND':\n random.shuffle(self._update_seq)\n\n for e in self._update_seq:\n self._calc_new_message(e[0], e[1])\n self._update_message(e[0], e[1])\n\n # Calculate new beliefs and compare with old ones\n max_diff = -np.inf\n for i in range(self.nrVars):\n b = self._belief_v(i).clone()\n max_diff = max(max_diff,\n dist_linf(b.p, self._oldBeliefsV[i].p))\n self._oldBeliefsV[i] = b\n for ii in range(self.nrFactors):\n b = self._belief_f(ii).clone()\n max_diff = max(max_diff,\n dist_linf(b.p, self._old_beliefs_f[ii].p))\n self._old_beliefs_f[ii] = b\n self._iters += 1\n\n if max_diff > self._maxdiff:\n self._maxdiff = max_diff\n return max_diff\n\n def _calc_belief_v(self, i: int) -> Prob:\n p = Prob.same_value(self.model.get_variable(i).domain.size(),\n 0.0 if self.logdomain else 1.0)\n for ii in self.nbV[i]:\n if self.logdomain:\n p += self._edges[i][ii.iter].new_message\n else:\n p *= self._edges[i][ii.iter].new_message\n return p\n\n def _belief_v(self, i: int) -> LDFactor:\n p = self._calc_belief_v(i)\n\n if self.logdomain:\n p.p = np.exp(p.p - np.max(p.p))\n p.normalize()\n return LDFactor(self.model, [i], p)\n\n def _belief_f(self, ii) -> LDFactor:\n p = self._calc_belief_f(ii)\n\n if self.logdomain:\n p.p = np.exp(p.p - np.max(p.p))\n p.normalize()\n\n return LDFactor(self.model, self.factors[ii].var_idx, p)\n\n def _calc_belief_f(self, ii: int) -> Prob:\n return self._calc_incoming_message_product(ii, False, 0)\n\n def log_z(self) -> float:\n \"\"\"Calculates logarithm of the partition function.\"\"\"\n ans = 0.0\n for i in range(self.nrVars):\n ans += (1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy()\n for ii in range(self.nrFactors):\n ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)\n return ans\n\n def marg_prob(self) -> np.ndarray:\n \"\"\"Calculates marginal probabilities.\"\"\"\n max_domain_size = np.max([self._var_size(i)\n for i in range(self.nrVars)])\n ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)\n for var_id in range(self.nrVars):\n ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p\n return ans\n\n def _var_size(self, var_idx):\n return self.model.get_variable(var_idx).domain.size()\n\n def _update_message(self, i: int, _I: int):\n if recordSentMessages:\n self._sentMessages.append((i, _I))\n if self.damping == 0.0:\n self._edges[i][_I].message = self._edges[i][_I].new_message.clone()\n if self.updates == 'SEQMAX':\n self._update_residual(i, _I, 0.0)\n else:\n d = self.damping\n old_msg = self._edges[i][_I].message.p\n new_msg = self._edges[i][_I].new_message.p\n if self.logdomain:\n self._edges[i][_I].message.p = (\n (old_msg * d) + (new_msg * (1.0 - d)))\n else:\n self._edges[i][_I].message.p = (\n (old_msg ** d) * (new_msg ** (1.0 - d)))\n if self.updates == 'SEQMAX':\n new_res = dist_linf(\n self._edges[i][_I].new_message,\n self._edges[i][_I].message)\n self._update_residual(i, _I, new_res)\n\n def _update_residual(self, i, _I, r):\n self._edges[i][_I].residual = r\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.ones_like",
"numpy.zeros",
"numpy.log",
"numpy.sum",
"numpy.ones",
"numpy.abs"
]
] |
jaimeenahn/COVID-sentence-bert
|
[
"2f47d116f7d9b774946fbf3c0724b721d1b88225"
] |
[
"sentence_transformers/losses/TripleSoftmaxLoss.py"
] |
[
"import torch\nfrom torch import nn, Tensor\nfrom typing import Union, Tuple, List, Iterable, Dict\nfrom ..SentenceTransformer import SentenceTransformer\nimport logging\n\nclass TripleSoftmaxLoss(nn.Module):\n def __init__(self,\n model: SentenceTransformer,\n sentence_embedding_dimension: int,\n num_labels: int,\n vocab,\n document_coef: float = 0.4,\n concatenation_sent_rep: bool = True,\n concatenation_sent_difference: bool = True,\n concatenation_sent_multiplication: bool = False):\n super(TripleSoftmaxLoss, self).__init__()\n self.model = model\n self.num_labels = num_labels\n self.hidden = 1000\n self.concatenation_sent_rep = concatenation_sent_rep\n self.concatenation_sent_difference = concatenation_sent_difference\n self.concatenation_sent_multiplication = concatenation_sent_multiplication\n self.document_coef = document_coef\n\n num_vectors_concatenated = 0\n if concatenation_sent_rep:\n num_vectors_concatenated += 2\n if concatenation_sent_difference:\n num_vectors_concatenated += 2\n\n logging.info(\"Softmax loss: #Vectors concatenated: {}\".format(num_vectors_concatenated))\n self.relu = nn.ReLU()\n self.document2hidden = nn.Linear(291868, self.hidden)\n self.hidden2output = nn.Linear(self.hidden, 768)\n self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)\n\n def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, document_rep: Tensor):\n reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]\n rep_a, rep_b = reps\n document_rep = self.relu(self.hidden2output(self.relu(self.document2hidden(document_rep.float()))))\n vectors_concat = []\n if self.concatenation_sent_rep:\n vectors_concat.append(rep_a)\n vectors_concat.append(rep_b)\n\n if self.concatenation_sent_difference:\n vectors_concat.append(torch.abs(rep_a - rep_b))\n vectors_concat.append(torch.abs(rep_a - document_rep))\n\n\n features = torch.cat(vectors_concat, 1)\n\n output = self.classifier(features)\n loss_fct = nn.CrossEntropyLoss()\n\n if labels is not None:\n loss = (1.0 - self.document_coef) * loss_fct(output, labels.view(-1))\n loss -= self.document_coef * torch.sum(torch.cosine_similarity(document_rep, rep_b)) # todo: MMI๊ฐ ๋ค์ด๊ฐ๋ฉด ์ข๊ธดํ๊ฒ ๋ค.\n return loss\n else:\n return reps, output"
] |
[
[
"torch.nn.Linear",
"torch.cosine_similarity",
"torch.cat",
"torch.nn.ReLU",
"torch.abs",
"torch.nn.CrossEntropyLoss"
]
] |
cisaacstern/hrpyzon
|
[
"10050b5286045f8a9a9d1338b5f4d418b19df39d",
"10050b5286045f8a9a9d1338b5f4d418b19df39d",
"10050b5286045f8a9a9d1338b5f4d418b19df39d"
] |
[
"datashader_nb.py",
"_plot.py",
"tcorrect_std/tc_funcs.py"
] |
[
"# +\nimport numpy as np\nimport holoviews as hv\nfrom holoviews import opts\nimport matplotlib.pyplot as plt\n\nfrom plotsun import plot_sun\n\nhv.extension('bokeh', 'matplotlib')\n# -\n\n# # Load data\n\ndata = np.load('npz_timeseries/subset.npz')\narr = data['arr']\nstack = data['stack']\nsun = data['sun']\nprint(arr.shape, stack.shape, sun.shape)\n\nstack[:,:,25]\n\nplt.imshow(stack[:,:,25], cmap='binary')\n\n# +\nstack = hv.Dataset((np.arange(stack.shape[2]),\n np.arange(stack.shape[0]), \n np.arange(stack.shape[1]), \n stack),\n ['Time', 'x', 'y'], 'Shadows')\n\nstack\n# -\n\narr = hv.Dataset((np.arange(arr.shape[0]),\n np.arange(arr.shape[1]), \n arr),\n ['x', 'y'], 'Elevation')\narr\n\n# # View\n\nopts.defaults(\n opts.GridSpace(shared_xaxis=True, shared_yaxis=True),\n opts.Image(cmap='viridis', invert_yaxis=True, width=400, height=400),\n opts.Labels(text_color='white', text_font_size='8pt', \n text_align='left', text_baseline='bottom'),\n opts.Path(color='white'),\n opts.Spread(width=600),\n opts.Overlay(show_legend=False))\n\nelevation = arr.to(hv.Image, ['x', 'y'])\n\nshadows = stack.to(hv.Image, ['x', 'y'])\n\nelevation\n\ndims = {'figsize':(4,5), 'top':1, 'bottom':0, 'left':0.2, 'right':0.95}\nplot_sun(sunposition=sun, d=dims)\n\nelevation * shadows\n\nstack[:,:,24]\n\n\n",
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_sun(sunposition, d):\n '''\n\n '''\n fig = plt.figure(figsize=d['figsize'])\n\n tks = [np.deg2rad(a) for a in np.linspace(0,360,8,endpoint=False)]\n xlbls = np.array(['N','45','E','135','S','225','W','315'])\n\n ax = fig.add_subplot(111, projection='polar')\n ax.set_theta_zero_location('N')\n ax.set_xticks((tks))\n ax.set_xticklabels(xlbls, rotation=\"vertical\", size=12)\n ax.tick_params(axis='x', pad=0.5)\n ax.set_theta_direction(-1)\n ax.set_rmin(0)\n ax.set_rmax(90)\n ax.set_rlabel_position(90)\n ax.set_title('Sun Position')\n\n xs = np.deg2rad(sunposition[0,:])\n ys = 90 - sunposition[1,:]\n\n ax.scatter(xs, ys, s=10, c='orange', alpha=0.5)\n \n plt.subplots_adjust(top=d['top'], bottom=d['bottom'], \n left=d['left'], right=d['right'])\n plt.close()\n return fig\n",
"import numpy as np\n\ndef calc_correction(grids, sunpos):\n '''\n The argument `grids` should be a tuple of `length = 2` for which\n `grids[0]` is an array of slope values in radians, and grids[1]\n is an array of aspect values in degrees, with south = 0, and east positive.\n '''\n slope, aspect = grids\n azi, alt = sunpos\n\n T0 = np.deg2rad(alt)\n P0 = np.deg2rad(180 - azi)\n\n S = np.deg2rad(slope) \n A = np.deg2rad(aspect)\n\n cosT0 = np.cos(T0)\n cosS = np.cos(S)\n sinT0 = np.sin(T0)\n sinS = np.sin(S)\n cosP0A = np.cos(P0 - A)\n\n cosT = (cosT0*cosS) + (sinT0*sinS*cosP0A)\n\n return cosT/cosT0"
] |
[
[
"numpy.arange",
"numpy.load",
"matplotlib.pyplot.imshow"
],
[
"numpy.array",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.deg2rad",
"numpy.linspace",
"matplotlib.pyplot.subplots_adjust"
],
[
"numpy.deg2rad",
"numpy.sin",
"numpy.cos"
]
] |
yijingru/ObjGuided-Instance-Segmentation
|
[
"71e39f84aada581743a5d65f103e63ba0fcc8a9a"
] |
[
"models/layers.py"
] |
[
"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\nclass CombinationModule(nn.Module):\n def __init__(self, c_low, c_up, batch_norm=False, group_norm=False, instance_norm=False):\n super(CombinationModule, self).__init__()\n if batch_norm:\n self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),\n nn.BatchNorm2d(c_up),\n nn.ReLU(inplace=True))\n self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1),\n nn.BatchNorm2d(c_up),\n nn.ReLU(inplace=True))\n elif group_norm:\n self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),\n nn.GroupNorm(num_groups=32, num_channels=c_up),\n nn.ReLU(inplace=True))\n self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1),\n nn.GroupNorm(num_groups=32, num_channels=c_up),\n nn.ReLU(inplace=True))\n elif instance_norm:\n self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),\n nn.InstanceNorm2d(num_features=c_up),#track_running_stats=True),\n nn.ReLU(inplace=True))\n self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1),\n nn.InstanceNorm2d(num_features=c_up),# track_running_stats=True),\n nn.ReLU(inplace=True))\n else:\n self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1),\n nn.ReLU(inplace=True))\n self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1),\n nn.ReLU(inplace=True))\n\n def forward(self, x_low, x_up):\n x_low = self.up(F.interpolate(x_low, x_up.shape[2:], mode='bilinear', align_corners=False))\n # if self.up[1].running_mean is not None:\n # print(self.up[1].running_mean.shape)\n return self.cat_conv(torch.cat((x_up, x_low), 1))\n"
] |
[
[
"torch.cat",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.GroupNorm",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d"
]
] |
hadleyhzy34/mpc_python_traj
|
[
"48451533c7ecd473e949c3a680a166fb046447bf"
] |
[
"cubic_spline_planner.py"
] |
[
"\"\"\"\nCubic spline planner\nAuthor: Atsushi Sakai(@Atsushi_twi)\n\"\"\"\nimport math\nimport numpy as np\nimport bisect\n\n\nclass Spline:\n \"\"\"\n Cubic Spline class\n \"\"\"\n\n def __init__(self, x, y):\n self.b, self.c, self.d, self.w = [], [], [], []\n\n self.x = x\n self.y = y\n\n self.nx = len(x) # dimension of x\n h = np.diff(x)\n\n # calc coefficient c\n self.a = [iy for iy in y]\n\n # calc coefficient c\n A = self.__calc_A(h)\n B = self.__calc_B(h)\n self.c = np.linalg.solve(A, B)\n # print(self.c1)\n\n # calc spline coefficient b and d\n for i in range(self.nx - 1):\n self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))\n tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \\\n (self.c[i + 1] + 2.0 * self.c[i]) / 3.0\n self.b.append(tb)\n\n def calc(self, t):\n \"\"\"\n Calc position\n if t is outside of the input x, return None\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return result\n\n def calcd(self, t):\n \"\"\"\n Calc first derivative\n if t is outside of the input x, return None\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n return result\n\n def calcdd(self, t):\n \"\"\"\n Calc second derivative\n \"\"\"\n\n if t < self.x[0]:\n return None\n elif t > self.x[-1]:\n return None\n\n i = self.__search_index(t)\n dx = t - self.x[i]\n result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx\n return result\n\n def __search_index(self, x):\n \"\"\"\n search data segment index\n \"\"\"\n return bisect.bisect(self.x, x) - 1\n\n def __calc_A(self, h):\n \"\"\"\n calc matrix A for spline coefficient c\n \"\"\"\n A = np.zeros((self.nx, self.nx))\n A[0, 0] = 1.0\n for i in range(self.nx - 1):\n if i != (self.nx - 2):\n A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])\n A[i + 1, i] = h[i]\n A[i, i + 1] = h[i]\n\n A[0, 1] = 0.0\n A[self.nx - 1, self.nx - 2] = 0.0\n A[self.nx - 1, self.nx - 1] = 1.0\n # print(A)\n return A\n\n def __calc_B(self, h):\n \"\"\"\n calc matrix B for spline coefficient c\n \"\"\"\n B = np.zeros(self.nx)\n for i in range(self.nx - 2):\n B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \\\n h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]\n return B\n\n\nclass Spline2D:\n \"\"\"\n 2D Cubic Spline class\n \"\"\"\n\n def __init__(self, x, y):\n self.s = self.__calc_s(x, y)\n self.sx = Spline(self.s, x)\n self.sy = Spline(self.s, y)\n\n def __calc_s(self, x, y):\n dx = np.diff(x)\n dy = np.diff(y)\n self.ds = np.hypot(dx, dy)\n s = [0]\n s.extend(np.cumsum(self.ds))\n return s\n\n def calc_position(self, s):\n \"\"\"\n calc position\n \"\"\"\n x = self.sx.calc(s)\n y = self.sy.calc(s)\n\n return x, y\n\n def calc_curvature(self, s):\n \"\"\"\n calc curvature\n \"\"\"\n dx = self.sx.calcd(s)\n ddx = self.sx.calcdd(s)\n dy = self.sy.calcd(s)\n ddy = self.sy.calcdd(s)\n k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))\n return k\n\n def calc_yaw(self, s):\n \"\"\"\n calc yaw\n \"\"\"\n dx = self.sx.calcd(s)\n dy = self.sy.calcd(s)\n yaw = math.atan2(dy, dx)\n return yaw\n\n\ndef calc_spline_course(x, y, ds=0.1):\n sp = Spline2D(x, y)\n s = list(np.arange(0, sp.s[-1], ds))\n\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n\n return rx, ry, ryaw, rk, s\n\n\ndef main(): # pragma: no cover\n print(\"Spline 2D test\")\n import matplotlib.pyplot as plt\n x = [-2.5, 0.0, 2.5, 5.0, 7.5, 3.0, -1.0]\n y = [0.7, -6, 5, 6.5, 0.0, 5.0, -2.0]\n ds = 0.1 # [m] distance of each interpolated points\n\n sp = Spline2D(x, y)\n s = np.arange(0, sp.s[-1], ds)\n \n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = sp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(sp.calc_yaw(i_s))\n rk.append(sp.calc_curvature(i_s))\n \n plt.plot(rx,ry)\n plt.show()\n plt.close()\n\n plt.subplots(1)\n plt.plot(x, y, \"xb\", label=\"input\")\n plt.plot(rx, ry, \"-r\", label=\"spline\")\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel(\"x[m]\")\n plt.ylabel(\"y[m]\")\n plt.legend()\n\n plt.subplots(1)\n plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], \"-r\", label=\"yaw\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"yaw angle[deg]\")\n\n plt.subplots(1)\n plt.plot(s, rk, \"-r\", label=\"curvature\")\n plt.grid(True)\n plt.legend()\n plt.xlabel(\"line length[m]\")\n plt.ylabel(\"curvature [1/m]\")\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.zeros",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"numpy.diff",
"numpy.rad2deg",
"numpy.arange",
"numpy.hypot",
"matplotlib.pyplot.ylabel",
"numpy.linalg.solve",
"numpy.cumsum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis"
]
] |
pietroepis/PyCTBN
|
[
"235c85c8fad8a85f1243dac8162dda60bf45291b"
] |
[
"PyCTBN/tests/structure_graph/test_networkgraph.py"
] |
[
"\n# License: MIT License\n\n\nimport unittest\nimport glob\nimport os\nimport networkx as nx\nimport numpy as np\nimport itertools\n\nfrom ...PyCTBN.structure_graph.sample_path import SamplePath\nfrom ...PyCTBN.structure_graph.network_graph import NetworkGraph\nfrom ...PyCTBN.utility.json_importer import JsonImporter\n\n\nclass TestNetworkGraph(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.read_files = glob.glob(os.path.join('./PyCTBN/test_data', \"*.json\"))\n cls.importer = JsonImporter(cls.read_files[2], 'samples', 'dyn.str', 'variables', 'Time', 'Name')\n cls.importer.import_data(0)\n cls.s1 = SamplePath(cls.importer)\n cls.s1.build_trajectories()\n cls.s1.build_structure()\n\n def test_init(self):\n g1 = NetworkGraph(self.s1.structure)\n self.assertEqual(self.s1.structure, g1._graph_struct)\n self.assertIsInstance(g1._graph, nx.DiGraph)\n self.assertIsNone(g1.time_scalar_indexing_strucure)\n self.assertIsNone(g1.transition_scalar_indexing_structure)\n self.assertIsNone(g1.transition_filtering)\n self.assertIsNone(g1.p_combs)\n\n def test_add_nodes(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n for n1, n2 in zip(g1.nodes, self.s1.structure.nodes_labels):\n self.assertEqual(n1, n2)\n\n def test_add_edges(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_edges(self.s1.structure.edges)\n for e in self.s1.structure.edges:\n self.assertIn(tuple(e), g1.edges)\n\n def test_fast_init(self):\n g1 = NetworkGraph(self.s1.structure)\n for node in self.s1.structure.nodes_labels:\n g1.fast_init(node)\n self.assertIsNotNone(g1._graph.nodes)\n self.assertIsNotNone(g1._graph.edges)\n self.assertIsInstance(g1._time_scalar_indexing_structure, np.ndarray)\n self.assertIsInstance(g1._transition_scalar_indexing_structure, np.ndarray)\n self.assertIsInstance(g1._time_filtering, np.ndarray)\n self.assertIsInstance(g1._transition_filtering, np.ndarray)\n self.assertIsInstance(g1._p_combs_structure, np.ndarray)\n self.assertIsInstance(g1._aggregated_info_about_nodes_parents, tuple)\n\n def test_get_ordered_by_indx_set_of_parents(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in self.s1.structure.nodes_labels:\n aggr_info = g1.get_ordered_by_indx_set_of_parents(node)\n for indx in range(len(aggr_info[0]) - 1 ):\n self.assertLess(g1.get_node_indx(aggr_info[0][indx]), g1.get_node_indx(aggr_info[0][indx + 1]))\n for par, par_indx in zip(aggr_info[0], aggr_info[1]):\n self.assertEqual(g1.get_node_indx(par), par_indx)\n for par, par_val in zip(aggr_info[0], aggr_info[2]):\n self.assertEqual(g1._graph_struct.get_states_number(par), par_val)\n\n def test_build_time_scalar_indexing_structure_for_a_node(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in self.s1.structure.nodes_labels:\n aggr_info = g1.get_ordered_by_indx_set_of_parents(node)\n self.aux_build_time_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],\n aggr_info[0], aggr_info[2])\n\n def aux_build_time_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels, parents_vals):\n node_states = graph.get_states_number(node_id)\n time_scalar_indexing = NetworkGraph.build_time_scalar_indexing_structure_for_a_node(node_states, parents_vals)\n self.assertEqual(len(time_scalar_indexing), len(parents_indxs) + 1)\n merged_list = parents_labels[:]\n merged_list.insert(0, node_id)\n vals_list = []\n for node in merged_list:\n vals_list.append(graph.get_states_number(node))\n t_vec = np.array(vals_list)\n t_vec = t_vec.cumprod()\n self.assertTrue(np.array_equal(time_scalar_indexing, t_vec))\n\n def test_build_transition_scalar_indexing_structure_for_a_node(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in self.s1.structure.nodes_labels:\n aggr_info = g1.get_ordered_by_indx_set_of_parents(node)\n self.aux_build_transition_scalar_indexing_structure_for_a_node(g1, node, aggr_info[1],\n aggr_info[0], aggr_info[2])\n\n def aux_build_transition_scalar_indexing_structure_for_a_node(self, graph, node_id, parents_indxs, parents_labels,\n parents_values):\n node_states = graph.get_states_number(node_id)\n transition_scalar_indexing = graph.build_transition_scalar_indexing_structure_for_a_node(node_states,\n parents_values)\n self.assertEqual(len(transition_scalar_indexing), len(parents_indxs) + 2)\n merged_list = parents_labels[:]\n merged_list.insert(0, node_id)\n merged_list.insert(0, node_id)\n vals_list = []\n for node_id in merged_list:\n vals_list.append(graph.get_states_number(node_id))\n m_vec = np.array([vals_list])\n m_vec = m_vec.cumprod()\n self.assertTrue(np.array_equal(transition_scalar_indexing, m_vec))\n\n def test_build_time_columns_filtering_structure_for_a_node(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in self.s1.structure.nodes_labels:\n aggr_info = g1.get_ordered_by_indx_set_of_parents(node)\n self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])\n\n def aux_build_time_columns_filtering_structure_for_a_node(self, graph, node_id, p_indxs):\n graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id), p_indxs)\n single_filter = []\n single_filter.append(graph.get_node_indx(node_id))\n single_filter.extend(p_indxs)\n self.assertTrue(np.array_equal(graph.build_time_columns_filtering_for_a_node(graph.get_node_indx(node_id),\n p_indxs),np.array(single_filter)))\n def test_build_transition_columns_filtering_structure(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in self.s1.structure.nodes_labels:\n aggr_info = g1.get_ordered_by_indx_set_of_parents(node)\n self.aux_build_time_columns_filtering_structure_for_a_node(g1, node, aggr_info[1])\n\n def aux_build_transition_columns_filtering_structure(self, graph, node_id, p_indxs):\n single_filter = []\n single_filter.append(graph.get_node_indx(node_id) + graph._graph_struct.total_variables_number)\n single_filter.append(graph.get_node_indx(node_id))\n single_filter.extend(p_indxs)\n self.assertTrue(np.array_equal(graph.build_transition_filtering_for_a_node(graph.get_node_indx(node_id),\n\n p_indxs), np.array(single_filter)))\n def test_build_p_combs_structure(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in self.s1.structure.nodes_labels:\n aggr_info = g1.get_ordered_by_indx_set_of_parents(node)\n self.aux_build_p_combs_structure(g1, aggr_info[2])\n\n def aux_build_p_combs_structure(self, graph, p_vals):\n p_combs = graph.build_p_comb_structure_for_a_node(p_vals)\n p_possible_vals = []\n for val in p_vals:\n vals = [v for v in range(val)]\n p_possible_vals.extend(vals)\n comb_struct = set(itertools.product(p_possible_vals,repeat=len(p_vals)))\n for comb in comb_struct:\n self.assertIn(np.array(comb), p_combs)\n\n def test_get_parents_by_id(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node in g1.nodes:\n self.assertListEqual(g1.get_parents_by_id(node), list(g1._graph.predecessors(node)))\n\n def test_get_states_number(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node, val in zip(g1.nodes, g1.nodes_values):\n self.assertEqual(val, g1.get_states_number(node))\n\n def test_get_node_indx(self):\n g1 = NetworkGraph(self.s1.structure)\n g1.add_nodes(self.s1.structure.nodes_labels)\n g1.add_edges(self.s1.structure.edges)\n for node, indx in zip(g1.nodes, g1.nodes_indexes):\n self.assertEqual(indx, g1.get_node_indx(node))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.array_equal"
]
] |
mtaillefumier/SIRIUS
|
[
"f4b5c4810af2a3ea1e67992d65750535227da84b",
"f4b5c4810af2a3ea1e67992d65750535227da84b"
] |
[
"python_module/sirius/ot/ot_precondition.py",
"python_module/sirius/edft/free_energy.py"
] |
[
"from ..coefficient_array import PwCoeffs\nfrom scipy.sparse import dia_matrix\nimport numpy as np\n\n\ndef make_kinetic_precond(kpointset, c0, eps=0.1, asPwCoeffs=True):\n \"\"\"\n Preconditioner\n P = 1 / (||k|| + ฮต)\n\n Keyword Arguments:\n kpointset --\n \"\"\"\n\n nk = len(kpointset)\n nc = kpointset.ctx().num_spins()\n if nc == 1 and nk == 1 and not asPwCoeffs:\n # return as np.matrix\n kp = kpointset[0]\n gkvec = kp.gkvec()\n assert (gkvec.num_gvec() == gkvec.count())\n N = gkvec.count()\n d = np.array([\n 1 / (np.sum((np.array(gkvec.gkvec(i)))**2) + eps)\n for i in range(N)\n ])\n return DiagonalPreconditioner(\n D=dia_matrix((d, 0), shape=(N, N)), c0=c0)\n else:\n P = PwCoeffs(dtype=np.float64, ctype=dia_matrix)\n for k in range(nk):\n kp = kpointset[k]\n gkvec = kp.gkvec()\n assert (gkvec.num_gvec() == gkvec.count())\n N = gkvec.count()\n d = np.array([\n 1 / (np.sum(\n (np.array(gkvec.gkvec_cart(i)))**2) + eps)\n for i in range(N)\n ])\n for ispn in range(nc):\n P[k, ispn] = dia_matrix((d, 0), shape=(N, N))\n return DiagonalPreconditioner(P, c0)\n\n\nclass Preconditioner:\n def __init__(self):\n pass\n\n\nclass DiagonalPreconditioner(Preconditioner):\n \"\"\"\n Apply diagonal preconditioner and project resulting gradient to satisfy the constraint.\n \"\"\"\n\n def __init__(self, D, c0):\n super().__init__()\n self.c0 = c0\n self.D = D\n\n def __matmul__(self, other):\n \"\"\"\n \"\"\"\n from ..coefficient_array import CoefficientArray\n from .ot_transformations import lagrangeMult\n\n out = type(other)(dtype=other.dtype)\n if isinstance(other, CoefficientArray):\n for key, Dl in self.D.items():\n out[key] = Dl * other[key]\n else:\n raise ValueError('wrong type given')\n ll = lagrangeMult(other, self.c0, self)\n return out + ll\n\n def __mul__(self, s):\n \"\"\"\n\n \"\"\"\n from ..coefficient_array import CoefficientArray\n import numpy as np\n\n if np.isscalar(s):\n for key, Dl in self.D.items():\n self.D[key] = s*Dl\n elif isinstance(s, CoefficientArray):\n out = type(s)(dtype=s.dtype)\n for key in s.keys():\n out[key] = self.D[key] * s[key]\n return out\n\n __lmul__ = __mul__\n __rmul__ = __mul__\n\n def __neg__(self):\n \"\"\"\n \"\"\"\n from ..coefficient_array import CoefficientArray\n if isinstance(self.D, CoefficientArray):\n out_data = type(self.D)(dtype=self.D.dtype, ctype=self.D.ctype)\n out = DiagonalPreconditioner(out_data, self.c0)\n for k, v in self.D.items():\n out.D[k] = -v\n return out\n else:\n out = DiagonalPreconditioner(self.D, self.c0)\n out.D = -self.D\n return out\n\n def __getitem__(self, key):\n return self.D[key]\n\n\nclass IdentityPreconditioner(Preconditioner):\n\n def __init__(self, c0, _f=1):\n super().__init__()\n self.c0 = c0\n self._f = _f\n\n def __matmul__(self, other):\n from .ot_transformations import lagrangeMult\n\n ll = lagrangeMult(other, self.c0, self)\n return self._f * other + ll\n\n def __mul__(self, s):\n return self._f * s\n\n def __neg__(self):\n return IdentityPreconditioner(self.c0, _f=-self._f)\n\n def __getitem__(self, key):\n return self._f\n\n __lmul__ = __mul__\n __rmul__ = __mul__\n",
"import numpy as np\nfrom scipy.constants import physical_constants\nfrom ..coefficient_array import CoefficientArray\nfrom .smearing import Smearing\n\n\ndef _s(x):\n \"\"\"\n entropy term\n \"\"\"\n x = np.array(x)\n out = np.zeros_like(x)\n ind = np.logical_or(np.isclose(x, 1), np.isclose(x, 0))\n z = x[~ind]\n out[~ind] = z**2 * np.log(z**2) + (1 - z**2) * np.log(1 - z**2)\n return out\n\n\ndef s(x):\n \"\"\"\n entropy term\n \"\"\"\n if isinstance(x, CoefficientArray):\n out = type(x)(dtype=x.dtype, ctype=np.array)\n for key, val in x._data.items():\n out[key] = _s(x[key])\n return out\n else:\n return _s(x)\n\n\nclass FreeEnergy:\n \"\"\"\n copied from Baarman implementation\n \"\"\"\n\n def __init__(self, E, T, smearing):\n \"\"\"\n Keyword Arguments:\n energy -- total energy object\n temperature -- temperature in Kelvin\n H -- Hamiltonian\n smearing --\n \"\"\"\n self.energy = E\n self.T = T\n assert isinstance(smearing, Smearing)\n self.smearing = smearing\n if self.energy.kpointset.ctx().num_mag_dims() == 0:\n self.scale = 0.5\n else:\n self.scale = 1\n\n def __call__(self, cn, fn):\n \"\"\"\n Keyword Arguments:\n cn -- PW coefficients\n fn -- occupations numbers\n \"\"\"\n\n self.energy.kpointset.fn = fn\n E, HX = self.energy.compute(cn)\n entropy = self.smearing.entropy(fn)\n return E + entropy, HX\n"
] |
[
[
"numpy.isscalar",
"scipy.sparse.dia_matrix"
],
[
"numpy.zeros_like",
"numpy.array",
"numpy.isclose",
"numpy.log"
]
] |
rcorona/R2R-EnvDrop
|
[
"e91c21283ffc309bedfe49596b4066afa338fde6"
] |
[
"r2r_src/agent.py"
] |
[
"\nimport json\nimport os\nimport sys\nimport numpy as np\nimport random\nimport math\nimport time\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\nfrom env import R2RBatch\nfrom utils import padding_idx, add_idx, Tokenizer\nimport utils\nimport model\nimport param\nfrom param import args\nfrom collections import defaultdict\n\n\nclass BaseAgent(object):\n ''' Base class for an R2R agent to generate and save trajectories. '''\n\n def __init__(self, env, results_path):\n self.env = env\n self.results_path = results_path\n random.seed(1)\n self.results = {}\n self.losses = [] # For learning agents\n \n def write_results(self):\n output = [{'instr_id':k, 'trajectory': v} for k,v in self.results.items()]\n with open(self.results_path, 'w') as f:\n json.dump(output, f)\n\n def get_results(self):\n output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]\n return output\n\n def rollout(self, **args):\n ''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''\n raise NotImplementedError\n\n @staticmethod\n def get_agent(name):\n return globals()[name+\"Agent\"]\n\n def test(self, iters=None, **kwargs):\n self.env.reset_epoch(shuffle=(iters is not None)) # If iters is not none, shuffle the env batch\n self.losses = []\n self.results = {}\n # We rely on env showing the entire batch before repeating anything\n looped = False\n self.loss = 0\n if iters is not None:\n # For each time, it will run the first 'iters' iterations. (It was shuffled before)\n for i in range(iters):\n for traj in self.rollout(**kwargs):\n self.loss = 0\n self.results[traj['instr_id']] = traj['path']\n else: # Do a full round\n while True:\n for traj in self.rollout(**kwargs):\n if traj['instr_id'] in self.results:\n looped = True\n else:\n self.loss = 0\n self.results[traj['instr_id']] = traj['path']\n if looped:\n break\n\nclass Seq2SeqAgent(BaseAgent):\n ''' An agent based on an LSTM seq2seq model with attention. '''\n\n # For now, the agent can't pick which forward move to make - just the one in the middle\n env_actions = {\n 'left': (0,-1, 0), # left\n 'right': (0, 1, 0), # right\n 'up': (0, 0, 1), # up\n 'down': (0, 0,-1), # down\n 'forward': (1, 0, 0), # forward\n '<end>': (0, 0, 0), # <end>\n '<start>': (0, 0, 0), # <start>\n '<ignore>': (0, 0, 0) # <ignore>\n }\n\n def __init__(self, env, results_path, tok, episode_len=20):\n super(Seq2SeqAgent, self).__init__(env, results_path)\n self.tok = tok\n self.episode_len = episode_len\n self.feature_size = self.env.feature_size\n\n # Models\n enc_hidden_size = args.rnn_dim//2 if args.bidir else args.rnn_dim\n self.encoder = model.EncoderLSTM(tok.vocab_size(), args.wemb, enc_hidden_size, padding_idx,\n args.dropout, bidirectional=args.bidir).cuda()\n self.decoder = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()\n self.critic = model.Critic().cuda()\n self.models = (self.encoder, self.decoder, self.critic)\n\n # Optimizers\n self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)\n self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)\n self.critic_optimizer = args.optimizer(self.critic.parameters(), lr=args.lr)\n self.optimizers = (self.encoder_optimizer, self.decoder_optimizer, self.critic_optimizer)\n\n # Evaluations\n self.losses = []\n self.criterion = nn.CrossEntropyLoss(ignore_index=args.ignoreid, size_average=False)\n\n # Logs\n sys.stdout.flush()\n self.logs = defaultdict(list)\n\n\n def _sort_batch(self, obs):\n ''' Extract instructions from a list of observations and sort by descending\n sequence length (to enable PyTorch packing). '''\n\n seq_tensor = np.array([ob['instr_encoding'] for ob in obs])\n seq_lengths = np.argmax(seq_tensor == padding_idx, axis=1)\n seq_lengths[seq_lengths == 0] = seq_tensor.shape[1] # Full length\n\n seq_tensor = torch.from_numpy(seq_tensor)\n seq_lengths = torch.from_numpy(seq_lengths)\n\n # Sort sequences by lengths\n seq_lengths, perm_idx = seq_lengths.sort(0, True) # True -> descending\n sorted_tensor = seq_tensor[perm_idx]\n mask = (sorted_tensor == padding_idx)[:,:seq_lengths[0]] # seq_lengths[0] is the Maximum length\n\n return Variable(sorted_tensor, requires_grad=False).long().cuda(), \\\n mask.byte().cuda(), \\\n list(seq_lengths), list(perm_idx)\n\n def _feature_variable(self, obs):\n ''' Extract precomputed features into variable. '''\n features = np.empty((len(obs), args.views, self.feature_size + args.angle_feat_size), dtype=np.float32)\n for i, ob in enumerate(obs):\n features[i, :, :] = ob['feature'] # Image feat\n return Variable(torch.from_numpy(features), requires_grad=False).cuda()\n\n def _candidate_variable(self, obs):\n candidate_leng = [len(ob['candidate']) + 1 for ob in obs] # +1 is for the end\n candidate_feat = np.zeros((len(obs), max(candidate_leng), self.feature_size + args.angle_feat_size), dtype=np.float32)\n # Note: The candidate_feat at len(ob['candidate']) is the feature for the END\n # which is zero in my implementation\n for i, ob in enumerate(obs):\n for j, c in enumerate(ob['candidate']):\n candidate_feat[i, j, :] = c['feature'] # Image feat\n return torch.from_numpy(candidate_feat).cuda(), candidate_leng\n\n def get_input_feat(self, obs):\n input_a_t = np.zeros((len(obs), args.angle_feat_size), np.float32)\n for i, ob in enumerate(obs):\n input_a_t[i] = utils.angle_feature(ob['heading'], ob['elevation'])\n input_a_t = torch.from_numpy(input_a_t).cuda()\n\n f_t = self._feature_variable(obs) # Image features from obs\n candidate_feat, candidate_leng = self._candidate_variable(obs)\n\n return input_a_t, f_t, candidate_feat, candidate_leng\n\n def _teacher_action(self, obs, ended):\n \"\"\"\n Extract teacher actions into variable.\n :param obs: The observation.\n :param ended: Whether the action seq is ended\n :return:\n \"\"\"\n a = np.zeros(len(obs), dtype=np.int64)\n for i, ob in enumerate(obs):\n if ended[i]: # Just ignore this index\n a[i] = args.ignoreid\n else:\n for k, candidate in enumerate(ob['candidate']):\n if candidate['viewpointId'] == ob['teacher']: # Next view point\n a[i] = k\n break\n else: # Stop here\n assert ob['teacher'] == ob['viewpoint'] # The teacher action should be \"STAY HERE\"\n a[i] = len(ob['candidate'])\n return torch.from_numpy(a).cuda()\n\n def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):\n \"\"\"\n Interface between Panoramic view and Egocentric view \n It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator\n \"\"\"\n def take_action(i, idx, name):\n if type(name) is int: # Go to the next view\n self.env.env.sims[idx].makeAction(name, 0, 0)\n else: # Adjust\n self.env.env.sims[idx].makeAction(*self.env_actions[name])\n state = self.env.env.sims[idx].getState()\n if traj is not None:\n traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))\n if perm_idx is None:\n perm_idx = range(len(perm_obs))\n for i, idx in enumerate(perm_idx):\n action = a_t[i]\n if action != -1: # -1 is the <stop> action\n select_candidate = perm_obs[i]['candidate'][action]\n src_point = perm_obs[i]['viewIndex']\n trg_point = select_candidate['pointId']\n src_level = (src_point ) // 12 # The point idx started from 0\n trg_level = (trg_point ) // 12\n while src_level < trg_level: # Tune up\n take_action(i, idx, 'up')\n src_level += 1\n while src_level > trg_level: # Tune down\n take_action(i, idx, 'down')\n src_level -= 1\n while self.env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target\n take_action(i, idx, 'right')\n assert select_candidate['viewpointId'] == \\\n self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId\n take_action(i, idx, select_candidate['idx'])\n\n def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None):\n \"\"\"\n :param train_ml: The weight to train with maximum likelihood\n :param train_rl: whether use RL in training\n :param reset: Reset the environment\n :param speaker: Speaker used in back translation.\n If the speaker is not None, use back translation.\n O.w., normal training\n :return:\n \"\"\"\n if self.feedback == 'teacher' or self.feedback == 'argmax':\n train_rl = False\n\n if reset:\n # Reset env\n obs = np.array(self.env.reset())\n else:\n obs = np.array(self.env._get_obs())\n\n batch_size = len(obs)\n\n if speaker is not None: # Trigger the self_train mode!\n noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda())\n batch = self.env.batch.copy()\n speaker.env = self.env\n insts = speaker.infer_batch(featdropmask=noise) # Use the same drop mask in speaker\n\n # Create fake environments with the generated instruction\n boss = np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>\n insts = np.concatenate((boss, insts), 1)\n for i, (datum, inst) in enumerate(zip(batch, insts)):\n if inst[-1] != self.tok.word_to_index['<PAD>']: # The inst is not ended!\n inst[-1] = self.tok.word_to_index['<EOS>']\n datum.pop('instructions')\n datum.pop('instr_encoding')\n datum['instructions'] = self.tok.decode_sentence(inst)\n datum['instr_encoding'] = inst\n obs = np.array(self.env.reset(batch))\n\n # Reorder the language input for the encoder (do not ruin the original code)\n seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)\n perm_obs = obs[perm_idx]\n\n ctx, h_t, c_t = self.encoder(seq, seq_lengths)\n ctx_mask = seq_mask\n\n # Init the reward shaping\n last_dist = np.zeros(batch_size, np.float32)\n for i, ob in enumerate(perm_obs): # The init distance from the view point to the target\n last_dist[i] = ob['distance']\n\n # Record starting point\n traj = [{\n 'instr_id': ob['instr_id'],\n 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]\n } for ob in perm_obs]\n\n # For test result submission\n visited = [set() for _ in perm_obs]\n\n # Initialization the tracking state\n ended = np.array([False] * batch_size) # Indices match permuation of the model, not env\n\n # Init the logs\n rewards = []\n hidden_states = []\n policy_log_probs = []\n masks = []\n entropys = []\n ml_loss = 0.\n\n h1 = h_t\n for t in range(self.episode_len):\n\n input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)\n if speaker is not None: # Apply the env drop mask to the feat\n candidate_feat[..., :-args.angle_feat_size] *= noise\n f_t[..., :-args.angle_feat_size] *= noise\n\n h_t, c_t, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,\n h_t, h1, c_t,\n ctx, ctx_mask,\n already_dropfeat=(speaker is not None))\n\n hidden_states.append(h_t)\n\n # Mask outputs where agent can't move forward\n # Here the logit is [b, max_candidate]\n candidate_mask = utils.length2mask(candidate_leng)\n if args.submit: # Avoding cyclic path\n for ob_id, ob in enumerate(perm_obs):\n visited[ob_id].add(ob['viewpoint'])\n for c_id, c in enumerate(ob['candidate']):\n if c['viewpointId'] in visited[ob_id]:\n candidate_mask[ob_id][c_id] = 1\n logit.masked_fill_(candidate_mask, -float('inf'))\n\n # Supervised training\n target = self._teacher_action(perm_obs, ended)\n ml_loss += self.criterion(logit, target)\n\n # Determine next model inputs\n if self.feedback == 'teacher':\n a_t = target # teacher forcing\n elif self.feedback == 'argmax': \n _, a_t = logit.max(1) # student forcing - argmax\n a_t = a_t.detach()\n log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here\n policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) # Gather the log_prob for each batch\n elif self.feedback == 'sample':\n probs = F.softmax(logit, 1) # sampling an action from model\n c = torch.distributions.Categorical(probs)\n self.logs['entropy'].append(c.entropy().sum().item()) # For log\n entropys.append(c.entropy()) # For optimization\n a_t = c.sample().detach()\n policy_log_probs.append(c.log_prob(a_t))\n else:\n print(self.feedback)\n sys.exit('Invalid feedback option')\n\n # Prepare environment action\n # NOTE: Env action is in the perm_obs space\n cpu_a_t = a_t.cpu().numpy()\n for i, next_id in enumerate(cpu_a_t):\n if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid or ended[i]: # The last action is <end>\n cpu_a_t[i] = -1 # Change the <end> and ignore action to -1\n\n # Make action and get the new state\n self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj)\n obs = np.array(self.env._get_obs())\n perm_obs = obs[perm_idx] # Perm the obs for the resu\n\n # Calculate the mask and reward\n dist = np.zeros(batch_size, np.float32)\n reward = np.zeros(batch_size, np.float32)\n mask = np.ones(batch_size, np.float32)\n for i, ob in enumerate(perm_obs):\n dist[i] = ob['distance']\n if ended[i]: # If the action is already finished BEFORE THIS ACTION.\n reward[i] = 0.\n mask[i] = 0.\n else: # Calculate the reward\n action_idx = cpu_a_t[i]\n if action_idx == -1: # If the action now is end\n if dist[i] < 3: # Correct\n reward[i] = 2.\n else: # Incorrect\n reward[i] = -2.\n else: # The action is not end\n reward[i] = - (dist[i] - last_dist[i]) # Change of distance\n if reward[i] > 0: # Quantification\n reward[i] = 1\n elif reward[i] < 0:\n reward[i] = -1\n else:\n raise NameError(\"The action doesn't change the move\")\n rewards.append(reward)\n masks.append(mask)\n last_dist[:] = dist\n\n # Update the finished actions\n # -1 means ended or ignored (already ended)\n ended[:] = np.logical_or(ended, (cpu_a_t == -1))\n\n # Early exit if all ended\n if ended.all(): \n break\n\n if train_rl:\n # Last action in A2C\n input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)\n if speaker is not None:\n candidate_feat[..., :-args.angle_feat_size] *= noise\n f_t[..., :-args.angle_feat_size] *= noise\n last_h_, _, _, _ = self.decoder(input_a_t, f_t, candidate_feat,\n h_t, h1, c_t,\n ctx, ctx_mask,\n speaker is not None)\n rl_loss = 0.\n\n # NOW, A2C!!!\n # Calculate the final discounted reward\n last_value__ = self.critic(last_h_).detach() # The value esti of the last state, remove the grad for safety\n discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero\n for i in range(batch_size):\n if not ended[i]: # If the action is not ended, use the value function as the last reward\n discount_reward[i] = last_value__[i]\n\n length = len(rewards)\n total = 0\n for t in range(length-1, -1, -1):\n discount_reward = discount_reward * args.gamma + rewards[t] # If it ended, the reward will be 0\n mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda()\n clip_reward = discount_reward.copy()\n r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()\n v_ = self.critic(hidden_states[t])\n a_ = (r_ - v_).detach()\n\n # r_: The higher, the better. -ln(p(action)) * (discount_reward - value)\n rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()\n rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss\n if self.feedback == 'sample':\n rl_loss += (- 0.01 * entropys[t] * mask_).sum()\n self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())\n\n total = total + np.sum(masks[t])\n self.logs['total'].append(total)\n\n # Normalize the loss function\n if args.normalize_loss == 'total':\n rl_loss /= total\n elif args.normalize_loss == 'batch':\n rl_loss /= batch_size\n else:\n assert args.normalize_loss == 'none'\n\n self.loss += rl_loss\n\n if train_ml is not None:\n self.loss += ml_loss * train_ml / batch_size\n\n if type(self.loss) is int: # For safety, it will be activated if no losses are added\n self.losses.append(0.)\n else:\n self.losses.append(self.loss.item() / self.episode_len) # This argument is useless.\n\n return traj\n\n def _dijkstra(self):\n \"\"\"\n The dijkstra algorithm.\n Was called beam search to be consistent with existing work.\n But it actually finds the Exact K paths with smallest listener log_prob.\n :return:\n [{\n \"scan\": XXX\n \"instr_id\":XXX,\n 'instr_encoding\": XXX\n 'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)\n \"paths\": {\n \"trajectory\": [viewpoint_id1, viewpoint_id2, ..., ],\n \"action\": [act_1, act_2, ..., ],\n \"listener_scores\": [log_prob_act1, log_prob_act2, ..., ],\n \"visual_feature\": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }\n }]\n \"\"\"\n def make_state_id(viewpoint, action): # Make state id\n return \"%s_%s\" % (viewpoint, str(action))\n def decompose_state_id(state_id): # Make state id\n viewpoint, action = state_id.split(\"_\")\n action = int(action)\n return viewpoint, action\n\n # Get first obs\n obs = self.env._get_obs()\n\n # Prepare the state id\n batch_size = len(obs)\n results = [{\"scan\": ob['scan'],\n \"instr_id\": ob['instr_id'],\n \"instr_encoding\": ob[\"instr_encoding\"],\n \"dijk_path\": [ob['viewpoint']],\n \"paths\": []} for ob in obs]\n\n # Encoder\n seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)\n recover_idx = np.zeros_like(perm_idx)\n for i, idx in enumerate(perm_idx):\n recover_idx[idx] = i\n ctx, h_t, c_t = self.encoder(seq, seq_lengths)\n ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order\n\n # Dijk Graph States:\n id2state = [\n {make_state_id(ob['viewpoint'], -95):\n {\"next_viewpoint\": ob['viewpoint'],\n \"running_state\": (h_t[i], h_t[i], c_t[i]),\n \"location\": (ob['viewpoint'], ob['heading'], ob['elevation']),\n \"feature\": None,\n \"from_state_id\": None,\n \"score\": 0,\n \"scores\": [],\n \"actions\": [],\n }\n }\n for i, ob in enumerate(obs)\n ] # -95 is the start point\n visited = [set() for _ in range(batch_size)]\n finished = [set() for _ in range(batch_size)]\n graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path\n ended = np.array([False] * batch_size)\n\n # Dijk Algorithm\n for _ in range(300):\n # Get the state with smallest score for each batch\n # If the batch is not ended, find the smallest item.\n # Else use a random item from the dict (It always exists)\n smallest_idXstate = [\n max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),\n key=lambda item: item[1]['score'])\n if not ended[i]\n else\n next(iter(id2state[i].items()))\n for i in range(batch_size)\n ]\n\n # Set the visited and the end seqs\n for i, (state_id, state) in enumerate(smallest_idXstate):\n assert (ended[i]) or (state_id not in visited[i])\n if not ended[i]:\n viewpoint, action = decompose_state_id(state_id)\n visited[i].add(state_id)\n if action == -1:\n finished[i].add(state_id)\n if len(finished[i]) >= args.candidates: # Get enough candidates\n ended[i] = True\n\n # Gather the running state in the batch\n h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))\n h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)\n\n # Recover the env and gather the feature\n for i, (state_id, state) in enumerate(smallest_idXstate):\n next_viewpoint = state['next_viewpoint']\n scan = results[i]['scan']\n from_viewpoint, heading, elevation = state['location']\n self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic\n obs = self.env._get_obs()\n\n # Update the floyd graph\n # Only used to shorten the navigation length\n # Will not effect the result\n for i, ob in enumerate(obs):\n viewpoint = ob['viewpoint']\n if not graphs[i].visited(viewpoint): # Update the Graph\n for c in ob['candidate']:\n next_viewpoint = c['viewpointId']\n dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]\n graphs[i].add_edge(viewpoint, next_viewpoint, dis)\n graphs[i].update(viewpoint)\n results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))\n\n input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)\n\n # Run one decoding step\n h_t, c_t, alpha, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,\n h_t, h1, c_t,\n ctx, ctx_mask,\n False)\n\n # Update the dijk graph's states with the newly visited viewpoint\n candidate_mask = utils.length2mask(candidate_leng)\n logit.masked_fill_(candidate_mask, -float('inf'))\n log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here\n _, max_act = log_probs.max(1)\n\n for i, ob in enumerate(obs):\n current_viewpoint = ob['viewpoint']\n candidate = ob['candidate']\n current_state_id, current_state = smallest_idXstate[i]\n old_viewpoint, from_action = decompose_state_id(current_state_id)\n assert ob['viewpoint'] == current_state['next_viewpoint']\n if from_action == -1 or ended[i]: # If the action is <end> or the batch is ended, skip it\n continue\n for j in range(len(ob['candidate']) + 1): # +1 to include the <end> action\n # score + log_prob[action]\n modified_log_prob = log_probs[i][j].detach().cpu().item() \n new_score = current_state['score'] + modified_log_prob\n if j < len(candidate): # A normal action\n next_id = make_state_id(current_viewpoint, j)\n next_viewpoint = candidate[j]['viewpointId']\n trg_point = candidate[j]['pointId']\n heading = (trg_point % 12) * math.pi / 6\n elevation = (trg_point // 12 - 1) * math.pi / 6\n location = (next_viewpoint, heading, elevation)\n else: # The end action\n next_id = make_state_id(current_viewpoint, -1) # action is -1\n next_viewpoint = current_viewpoint # next viewpoint is still here\n location = (current_viewpoint, ob['heading'], ob['elevation'])\n\n if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:\n id2state[i][next_id] = {\n \"next_viewpoint\": next_viewpoint,\n \"location\": location,\n \"running_state\": (h_t[i], h1[i], c_t[i]),\n \"from_state_id\": current_state_id,\n \"feature\": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),\n \"score\": new_score,\n \"scores\": current_state['scores'] + [modified_log_prob],\n \"actions\": current_state['actions'] + [len(candidate)+1],\n }\n\n # The active state is zero after the updating, then setting the ended to True\n for i in range(batch_size):\n if len(visited[i]) == len(id2state[i]): # It's the last active state\n ended[i] = True\n\n # End?\n if ended.all():\n break\n\n # Move back to the start point\n for i in range(batch_size):\n results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))\n \"\"\"\n \"paths\": {\n \"trajectory\": [viewpoint_id1, viewpoint_id2, ..., ],\n \"action\": [act_1, act_2, ..., ],\n \"listener_scores\": [log_prob_act1, log_prob_act2, ..., ],\n \"visual_feature\": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }\n \"\"\"\n # Gather the Path\n for i, result in enumerate(results):\n assert len(finished[i]) <= args.candidates\n for state_id in finished[i]:\n path_info = {\n \"trajectory\": [],\n \"action\": [],\n \"listener_scores\": id2state[i][state_id]['scores'],\n \"listener_actions\": id2state[i][state_id]['actions'],\n \"visual_feature\": []\n }\n viewpoint, action = decompose_state_id(state_id)\n while action != -95:\n state = id2state[i][state_id]\n path_info['trajectory'].append(state['location'])\n path_info['action'].append(action)\n path_info['visual_feature'].append(state['feature'])\n state_id = id2state[i][state_id]['from_state_id']\n viewpoint, action = decompose_state_id(state_id)\n state = id2state[i][state_id]\n path_info['trajectory'].append(state['location'])\n for need_reverse_key in [\"trajectory\", \"action\", \"visual_feature\"]:\n path_info[need_reverse_key] = path_info[need_reverse_key][::-1]\n result['paths'].append(path_info)\n\n return results\n\n def beam_search(self, speaker):\n \"\"\"\n :param speaker: The speaker to be used in searching.\n :return:\n {\n \"scan\": XXX\n \"instr_id\":XXX,\n \"instr_encoding\": XXX\n \"dijk_path\": [v1, v2, ...., vn]\n \"paths\": [{\n \"trajectory\": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],\n \"action\": [act_1, act_2, ..., ],\n \"listener_scores\": [log_prob_act1, log_prob_act2, ..., ],\n \"speaker_scores\": [log_prob_word1, log_prob_word2, ..., ],\n }]\n }\n \"\"\"\n self.env.reset()\n results = self._dijkstra()\n \"\"\"\n return from self._dijkstra()\n [{\n \"scan\": XXX\n \"instr_id\":XXX,\n \"instr_encoding\": XXX\n \"dijk_path\": [v1, v2, ...., vn]\n \"paths\": [{\n \"trajectory\": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],\n \"action\": [act_1, act_2, ..., ],\n \"listener_scores\": [log_prob_act1, log_prob_act2, ..., ],\n \"visual_feature\": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }]\n }]\n \"\"\"\n\n # Compute the speaker scores:\n for result in results:\n lengths = []\n num_paths = len(result['paths'])\n for path in result['paths']:\n assert len(path['trajectory']) == (len(path['visual_feature']) + 1)\n lengths.append(len(path['visual_feature']))\n max_len = max(lengths)\n img_feats = torch.zeros(num_paths, max_len, 36, self.feature_size + args.angle_feat_size)\n can_feats = torch.zeros(num_paths, max_len, self.feature_size + args.angle_feat_size)\n for j, path in enumerate(result['paths']):\n for k, feat in enumerate(path['visual_feature']):\n img_feat, can_feat = feat\n img_feats[j][k] = img_feat\n can_feats[j][k] = can_feat\n img_feats, can_feats = img_feats.cuda(), can_feats.cuda()\n features = ((img_feats, can_feats), lengths)\n insts = np.array([result['instr_encoding'] for _ in range(num_paths)])\n seq_lengths = np.argmax(insts == self.tok.word_to_index['<EOS>'], axis=1) # len(seq + 'BOS') == len(seq + 'EOS')\n insts = torch.from_numpy(insts).cuda()\n speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True)\n for j, path in enumerate(result['paths']):\n path.pop(\"visual_feature\")\n path['speaker_scores'] = -speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]\n return results\n\n def beam_search_test(self, speaker):\n self.encoder.eval()\n self.decoder.eval()\n self.critic.eval()\n\n looped = False\n self.results = {}\n while True:\n for traj in self.beam_search(speaker):\n if traj['instr_id'] in self.results:\n looped = True\n else:\n self.results[traj['instr_id']] = traj\n if looped:\n break\n\n def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None):\n ''' Evaluate once on each instruction in the current environment '''\n self.feedback = feedback\n if use_dropout:\n self.encoder.train()\n self.decoder.train()\n self.critic.train()\n else:\n self.encoder.eval()\n self.decoder.eval()\n self.critic.eval()\n super(Seq2SeqAgent, self).test(iters)\n\n def zero_grad(self):\n self.loss = 0.\n self.losses = []\n for model, optimizer in zip(self.models, self.optimizers):\n model.train()\n optimizer.zero_grad()\n\n def accumulate_gradient(self, feedback='teacher', **kwargs):\n if feedback == 'teacher':\n self.feedback = 'teacher'\n self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)\n elif feedback == 'sample':\n self.feedback = 'teacher'\n self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)\n self.feedback = 'sample'\n self.rollout(train_ml=None, train_rl=True, **kwargs)\n else:\n assert False\n\n def optim_step(self):\n self.loss.backward()\n\n torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)\n torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)\n\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n self.critic_optimizer.step()\n\n def train(self, n_iters, feedback='teacher', **kwargs):\n ''' Train for a given number of iterations '''\n self.feedback = feedback\n\n self.encoder.train()\n self.decoder.train()\n self.critic.train()\n\n self.losses = []\n for iter in tqdm(range(1, n_iters + 1)):\n\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n self.critic_optimizer.zero_grad()\n\n self.loss = 0\n if feedback == 'teacher':\n self.feedback = 'teacher'\n self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)\n elif feedback == 'sample':\n if args.ml_weight != 0:\n self.feedback = 'teacher'\n self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)\n self.feedback = 'sample'\n self.rollout(train_ml=None, train_rl=True, **kwargs)\n else:\n assert False\n\n self.loss.backward()\n\n torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)\n torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)\n\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n self.critic_optimizer.step()\n\n def save(self, epoch, path):\n ''' Snapshot models '''\n the_dir, _ = os.path.split(path)\n os.makedirs(the_dir, exist_ok=True)\n states = {}\n def create_state(name, model, optimizer):\n states[name] = {\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n all_tuple = [(\"encoder\", self.encoder, self.encoder_optimizer),\n (\"decoder\", self.decoder, self.decoder_optimizer),\n (\"critic\", self.critic, self.critic_optimizer)]\n for param in all_tuple:\n create_state(*param)\n torch.save(states, path)\n\n def load(self, path):\n ''' Loads parameters (but not training state) '''\n states = torch.load(path)\n def recover_state(name, model, optimizer):\n state = model.state_dict()\n model_keys = set(state.keys())\n load_keys = set(states[name]['state_dict'].keys())\n if model_keys != load_keys:\n print(\"NOTICE: DIFFERENT KEYS IN THE LISTEREN\")\n state.update(states[name]['state_dict'])\n model.load_state_dict(state)\n if args.loadOptim:\n optimizer.load_state_dict(states[name]['optimizer'])\n all_tuple = [(\"encoder\", self.encoder, self.encoder_optimizer),\n (\"decoder\", self.decoder, self.decoder_optimizer),\n (\"critic\", self.critic, self.critic_optimizer)]\n for param in all_tuple:\n recover_state(*param)\n return states['encoder']['epoch'] - 1\n\n"
] |
[
[
"torch.distributions.Categorical",
"torch.stack",
"torch.ones",
"torch.load",
"torch.nn.CrossEntropyLoss",
"numpy.concatenate",
"numpy.zeros_like",
"torch.autograd.Variable",
"numpy.argmax",
"torch.zeros",
"numpy.logical_or",
"numpy.array",
"numpy.zeros",
"torch.save",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"numpy.sum",
"numpy.ones",
"torch.from_numpy"
]
] |
seanschneeweiss/RoSeMotion
|
[
"4ef7997c8976a8489798a427c768af5114f6b31e"
] |
[
"app/resources/pymo/pymo/parsers.py"
] |
[
"'''\nBVH Parser Class\n\nBy Omid Alemi\nCreated: June 12, 2017\n\nBased on: https://gist.github.com/johnfredcee/2007503\n\n'''\nimport re\nimport numpy as np\nfrom data import Joint, MocapData\n\n\nclass BVHScanner:\n '''\n A wrapper class for re.Scanner\n '''\n def __init__(self):\n\n def identifier(scanner, token):\n return 'IDENT', token\n\n def operator(scanner, token):\n return 'OPERATOR', token\n\n def digit(scanner, token):\n return 'DIGIT', token\n\n def open_brace(scanner, token):\n return 'OPEN_BRACE', token\n\n def close_brace(scanner, token):\n return 'CLOSE_BRACE', token\n\n self.scanner = re.Scanner([\n (r'[a-zA-Z_]\\w*', identifier),\n #(r'-*[0-9]+(\\.[0-9]+)?', digit), # won't work for .34\n #(r'[-+]?[0-9]*\\.?[0-9]+', digit), # won't work for 4.56e-2\n #(r'[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?', digit),\n (r'-*[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?', digit),\n (r'}', close_brace),\n (r'}', close_brace),\n (r'{', open_brace),\n (r':', None),\n (r'\\s+', None)\n ])\n\n def scan(self, stuff):\n return self.scanner.scan(stuff)\n\n\n\nclass BVHParser():\n '''\n A class to parse a BVH file.\n \n Extracts the skeleton and channel values\n '''\n def __init__(self, filename=None):\n self.reset()\n\n def reset(self): \n self._skeleton = {}\n self.bone_context = []\n self._motion_channels = []\n self._motions = []\n self.current_token = 0\n self.framerate = 0.0\n self.root_name = ''\n\n self.scanner = BVHScanner()\n \n self.data = MocapData()\n\n\n def parse(self, filename):\n self.reset()\n\n with open(filename, 'r') as bvh_file:\n raw_contents = bvh_file.read()\n tokens, remainder = self.scanner.scan(raw_contents)\n self._parse_hierarchy(tokens)\n self.current_token = self.current_token + 1\n self._parse_motion(tokens)\n \n self.data.skeleton = self._skeleton\n self.data.channel_names = self._motion_channels\n self.data.values = self._to_DataFrame()\n self.data.root_name = self.root_name\n self.data.framerate = self.framerate\n\n return self.data\n \n def _to_DataFrame(self):\n '''Returns all of the channels parsed from the file as a pandas DataFrame'''\n\n import pandas as pd\n time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')\n frames = [f[1] for f in self._motions]\n channels = np.asarray([[channel[2] for channel in frame] for frame in frames])\n column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels]\n\n return pd.DataFrame(data=channels, index=time_index, columns=column_names)\n\n\n def _new_bone(self, parent, name):\n bone = {'parent': parent, 'channels': [], 'offsets': [],'children': []}\n return bone\n\n def _push_bone_context(self,name):\n self.bone_context.append(name)\n\n def _get_bone_context(self):\n return self.bone_context[len(self.bone_context)-1]\n\n def _pop_bone_context(self):\n self.bone_context = self.bone_context[:-1]\n return self.bone_context[len(self.bone_context)-1]\n\n def _read_offset(self, bvh, token_index):\n if bvh[token_index] != ('IDENT', 'OFFSET'):\n return None, None\n token_index = token_index + 1\n offsets = [0.0] * 3\n for i in range(3):\n offsets[i] = float(bvh[token_index][1])\n token_index = token_index + 1\n return offsets, token_index\n \n def _read_channels(self, bvh, token_index):\n if bvh[token_index] != ('IDENT', 'CHANNELS'):\n return None, None\n token_index = token_index + 1\n channel_count = int(bvh[token_index][1])\n token_index = token_index + 1\n channels = [\"\"] * channel_count\n for i in range(channel_count):\n channels[i] = bvh[token_index][1]\n token_index = token_index + 1\n return channels, token_index\n\n def _parse_joint(self, bvh, token_index):\n end_site = False\n joint_id = bvh[token_index][1]\n token_index = token_index + 1\n joint_name = bvh[token_index][1]\n token_index = token_index + 1\n \n parent_name = self._get_bone_context()\n\n if (joint_id == \"End\"):\n joint_name = parent_name+ '_Nub'\n end_site = True\n joint = self._new_bone(parent_name, joint_name)\n if bvh[token_index][0] != 'OPEN_BRACE':\n print('Was expecting brance, got ', bvh[token_index])\n return None\n token_index = token_index + 1\n offsets, token_index = self._read_offset(bvh, token_index)\n joint['offsets'] = offsets\n if not end_site:\n channels, token_index = self._read_channels(bvh, token_index)\n joint['channels'] = channels\n for channel in channels:\n self._motion_channels.append((joint_name, channel))\n\n self._skeleton[joint_name] = joint\n self._skeleton[parent_name]['children'].append(joint_name)\n\n while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'):\n self._push_bone_context(joint_name)\n token_index = self._parse_joint(bvh, token_index)\n self._pop_bone_context()\n\n if bvh[token_index][0] == 'CLOSE_BRACE':\n return token_index + 1\n\n print('Unexpected token ', bvh[token_index])\n\n def _parse_hierarchy(self, bvh):\n self.current_token = 0\n if bvh[self.current_token] != ('IDENT', 'HIERARCHY'):\n return None\n self.current_token = self.current_token + 1\n if bvh[self.current_token] != ('IDENT', 'ROOT'):\n return None\n self.current_token = self.current_token + 1\n if bvh[self.current_token][0] != 'IDENT':\n return None\n\n root_name = bvh[self.current_token][1]\n root_bone = self._new_bone(None, root_name)\n self.current_token = self.current_token + 2 #skipping open brace\n offsets, self.current_token = self._read_offset(bvh, self.current_token)\n channels, self.current_token = self._read_channels(bvh, self.current_token)\n root_bone['offsets'] = offsets\n root_bone['channels'] = channels\n self._skeleton[root_name] = root_bone\n self._push_bone_context(root_name)\n \n for channel in channels:\n self._motion_channels.append((root_name, channel))\n\n while bvh[self.current_token][1] == 'JOINT':\n self.current_token = self._parse_joint(bvh, self.current_token)\n \n self.root_name = root_name\n\n def _parse_motion(self, bvh):\n if bvh[self.current_token][0] != 'IDENT':\n print('Unexpected text')\n return None\n if bvh[self.current_token][1] != 'MOTION':\n print('No motion section')\n return None\n self.current_token = self.current_token + 1\n if bvh[self.current_token][1] != 'Frames':\n return None\n self.current_token = self.current_token + 1\n frame_count = int(bvh[self.current_token][1])\n self.current_token = self.current_token + 1\n if bvh[self.current_token][1] != 'Frame':\n return None\n self.current_token = self.current_token + 1\n if bvh[self.current_token][1] != 'Time':\n return None\n self.current_token = self.current_token + 1\n frame_rate = float(bvh[self.current_token][1])\n\n self.framerate = frame_rate\n \n self.current_token = self.current_token + 1\n \n frame_time = 0.0\n self._motions = [()] * frame_count\n for i in range(frame_count):\n channel_values = []\n for channel in self._motion_channels:\n channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1])))\n self.current_token = self.current_token + 1\n self._motions[i] = (frame_time, channel_values)\n frame_time = frame_time + frame_rate\n"
] |
[
[
"pandas.DataFrame",
"pandas.to_timedelta",
"numpy.asarray"
]
] |
li012589/NeuralWavelet
|
[
"6e593ded5cb4ae80579cbf56eb9c346d808669cb"
] |
[
"test/test_cdf.py"
] |
[
"\nimport os\nimport sys\nsys.path.append(os.getcwd())\n\nimport numpy as np\nimport torch\nimport flow\nfrom utils import cdfDiscreteLogitstic, cdfMixDiscreteLogistic\nfrom utils import logDiscreteLogistic, logMixDiscreteLogistic\n\nnbins = 4096\n_bins = torch.arange(-nbins // 2, nbins // 2).reshape(-1, 1, 1, 1, 1)\ndecimal = flow.ScalingNshifting(256, -128)\n\n\ndef test_disLogisticCDF():\n logscale = torch.tensor(\n [[[[-3.6826, -3.0157, -3.6032],\n [-3.7063, -3.0269, -3.5338],\n [-3.5311, -2.9907, -3.3516],\n [-3.9300, -3.3121, -3.8110]],\n\n [[-3.1022, -3.0692, -3.2039],\n [-2.9466, -3.0006, -3.2969],\n [-2.7636, -2.5691, -2.9628],\n [-3.3657, -3.2948, -3.5318]],\n\n [[-3.9748, -3.0670, -3.2399],\n [-3.9312, -3.0055, -3.1729],\n [-3.8588, -2.9139, -3.1794],\n [-4.1534, -3.2404, -3.5665]]]]\n )\n\n mean = torch.tensor(\n [[[[ 0.0191, 0.0459, 0.0131],\n [-0.0059, 0.0254, -0.0100],\n [ 0.0359, 0.0406, 0.0242],\n [ 0.0331, 0.0438, 0.0255]],\n\n [[ 0.0214, 0.0502, 0.0622],\n [ 0.0371, 0.0368, 0.0517],\n [ 0.0217, 0.0855, 0.0874],\n [ 0.0144, 0.0475, 0.0470]],\n\n [[-0.0602, -0.0791, -0.0784],\n [-0.0443, -0.0765, -0.0701],\n [-0.0654, -0.0709, -0.0788],\n [-0.0608, -0.0721, -0.0688]]]]\n )\n\n bins = _bins - 1 + torch.round(decimal.forward_(mean))\n\n cdf = cdfDiscreteLogitstic(bins, mean, logscale, decimal=decimal).detach().numpy()\n\n pList = []\n for i in range(bins.shape[0]):\n logp = logDiscreteLogistic(bins[i: i + 1], mean, logscale, decimal=decimal).detach().numpy()\n pList.append(np.exp(logp).reshape(mean.shape))\n pList = np.array(pList)\n\n _cdf = np.cumsum(pList, 0)\n\n assert np.allclose(cdf, _cdf)\n\n\ndef test_mixDixLogisticCDF():\n mean = torch.tensor(\n [[[[-0.2414, 0.2089, -0.0209, -0.1279]],\n [[ 0.7791, 0.1031, 0.0940, 0.1678]],\n [[ 0.0095, 0.0391, -0.0318, -0.2183]]],\n [[[-0.1466, 0.2090, -0.0594, -0.0837]],\n [[ 0.8711, 0.0540, 0.0940, 0.0859]],\n [[-0.0683, -0.0204, -0.0340, -0.0587]]],\n [[[-0.1994, -0.0442, -0.0307, -0.0823]],\n [[ 1.0158, 0.0636, 0.0832, 0.0717]],\n [[-0.1863, -0.0177, -0.0293, -0.0708]]],\n [[[-0.3517, 0.1062, -0.0362, -0.1661]],\n [[ 0.6567, 0.1452, 0.0294, 0.0864]],\n [[-0.1384, -0.0171, -0.0195, -0.0710]]],\n [[[-0.3158, 0.2068, 0.1114, -0.1251]],\n [[ 0.5600, 0.1987, 0.1891, 0.1754]],\n [[-0.2758, -0.1032, -0.0435, -0.1156]]]])\n logscale = torch.tensor(\n [[[[-3.1292, -4.0168, -3.2886, -2.5948]],\n [[-2.8226, -2.3489, -2.8613, -2.3892]],\n [[-3.3502, -3.4929, -2.9572, -2.7060]]],\n [[[-3.4556, -4.0166, -2.7471, -3.1203]],\n [[-2.6906, -3.6062, -2.8620, -3.0673]],\n [[-3.2775, -3.3661, -3.2897, -4.0553]]],\n [[[-3.4652, -3.3828, -3.3053, -3.6945]],\n [[-2.7657, -2.9172, -3.4067, -3.7734]],\n [[-3.4817, -3.0397, -2.8021, -3.1398]]],\n [[[-2.7246, -3.7798, -4.1237, -2.8605]],\n [[-3.0524, -2.6628, -2.4833, -3.0913]],\n [[-4.0249, -3.8364, -3.7608, -2.7111]]],\n [[[-3.5460, -4.0208, -2.9837, -3.1288]],\n [[-3.2062, -2.1702, -2.2238, -2.6122]],\n [[-3.1754, -3.0892, -2.3359, -2.4321]]]])\n mixing = torch.tensor(\n [[[[ 1.3161, 0.8664, 1.7648, -0.7598, -0.8658],\n [-3.7472, -3.6553, 5.2783, 0.2242, -3.6304],\n [-0.7378, 0.2730, 1.8044, 0.7450, -1.6218],\n [-0.8105, 1.8833, 1.8243, -0.7879, -1.1211]]],\n [[[ 1.3952, -0.8232, -1.0135, 1.8041, 0.9846],\n [-0.4372, 1.1296, 1.5473, -0.0661, -0.5995],\n [-0.5167, 1.5559, 1.2607, -0.3227, -0.8687],\n [-0.6226, 1.5024, 1.4221, 1.4741, -0.4409]]],\n [[[ 1.3045, 1.8551, 0.1755, -0.6253, -1.2045],\n [-0.9858, 1.5529, -0.6332, 1.4569, -1.1089],\n [-0.5954, 1.2305, 1.4068, 0.7919, -0.3811],\n [-0.2997, 0.6804, 2.0660, 1.1353, -0.9155]]]])\n\n bins = _bins - 1 + torch.round(decimal.forward_(mean.permute([1, 2, 3, 0])) * mixing).sum(-1).reshape(1, *mean.shape[1:])\n cdf = cdfMixDiscreteLogistic(bins, mean, logscale, mixing, decimal=decimal)\n\n pList = []\n for i in range(bins.shape[0]):\n logp = logMixDiscreteLogistic(bins[i: i + 1], mean, logscale, mixing, decimal=decimal).detach().numpy()\n pList.append(np.exp(logp).reshape(logp.shape[1:]))\n pList = np.array(pList)\n\n _cdf = np.cumsum(pList, 0)\n\n assert np.allclose(cdf, _cdf)\n\n\nif __name__ == \"__main__\":\n\n test_disLogisticCDF()\n test_mixDixLogisticCDF()"
] |
[
[
"numpy.array",
"torch.arange",
"numpy.exp",
"numpy.allclose",
"torch.tensor",
"numpy.cumsum"
]
] |
nickcanz/chime
|
[
"cb03218ee5cc71b92704c8be379924ac459259d7"
] |
[
"src/penn_chime/charts.py"
] |
[
"\nfrom math import ceil\nimport datetime\n\nfrom altair import Chart # type: ignore\nimport pandas as pd # type: ignore\nimport numpy as np\n\nfrom .parameters import Parameters\nfrom .utils import add_date_column\nfrom .presentation import DATE_FORMAT\n\n\ndef new_admissions_chart(\n alt, projection_admits: pd.DataFrame, parameters: Parameters\n) -> Chart:\n \"\"\"docstring\"\"\"\n plot_projection_days = parameters.n_days - 10\n max_y_axis = parameters.max_y_axis\n as_date = parameters.as_date\n\n y_scale = alt.Scale()\n\n if max_y_axis is not None:\n y_scale.domain = (0, max_y_axis)\n\n tooltip_dict = {False: \"day\", True: \"date:T\"}\n if as_date:\n projection_admits = add_date_column(projection_admits)\n x_kwargs = {\"shorthand\": \"date:T\", \"title\": \"Date\", \"axis\": alt.Axis(format=(DATE_FORMAT))}\n else:\n x_kwargs = {\"shorthand\": \"day\", \"title\": \"Days from today\"}\n\n # TODO fix the fold to allow any number of dispositions\n\n ceiled_admits = projection_admits.copy()\n\n ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)\n ceiled_admits.icu = np.ceil(ceiled_admits.icu)\n ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)\n\n return (\n alt.Chart(ceiled_admits.head(plot_projection_days))\n .transform_fold(fold=[\"hospitalized\", \"icu\", \"ventilated\"])\n .mark_line(point=True)\n .encode(\n x=alt.X(**x_kwargs),\n y=alt.Y(\"value:Q\", title=\"Daily admissions\", scale=y_scale),\n color=\"key:N\",\n tooltip=[\n tooltip_dict[as_date],\n alt.Tooltip(\"value:Q\", format=\".0f\", title=\"Admissions\"),\n \"key:N\",\n ],\n )\n .interactive()\n )\n\n\ndef admitted_patients_chart(\n alt, census: pd.DataFrame, parameters: Parameters\n) -> Chart:\n \"\"\"docstring\"\"\"\n\n plot_projection_days = parameters.n_days - 10\n max_y_axis = parameters.max_y_axis\n as_date = parameters.as_date\n if as_date:\n census = add_date_column(census)\n x_kwargs = {\"shorthand\": \"date:T\", \"title\": \"Date\", \"axis\": alt.Axis(format=(DATE_FORMAT))}\n idx = \"date:T\"\n else:\n x_kwargs = {\"shorthand\": \"day\", \"title\": \"Days from today\"}\n idx = \"day\"\n\n y_scale = alt.Scale()\n\n if max_y_axis:\n y_scale.domain = (0, max_y_axis)\n\n # TODO fix the fold to allow any number of dispositions\n return (\n alt.Chart(census.head(plot_projection_days))\n .transform_fold(fold=[\"hospitalized\", \"icu\", \"ventilated\"])\n .mark_line(point=True)\n .encode(\n x=alt.X(**x_kwargs),\n y=alt.Y(\"value:Q\", title=\"Census\", scale=y_scale),\n color=\"key:N\",\n tooltip=[\n idx,\n alt.Tooltip(\"value:Q\", format=\".0f\", title=\"Census\"),\n \"key:N\",\n ],\n )\n .interactive()\n )\n\n\ndef additional_projections_chart(\n alt, model, parameters\n) -> Chart:\n\n # TODO use subselect of df_raw instead of creating a new df\n raw_df = model.raw_df\n dat = pd.DataFrame({\n \"infected\": raw_df.infected,\n \"recovered\": raw_df.recovered\n })\n dat[\"day\"] = dat.index\n\n as_date = parameters.as_date\n max_y_axis = parameters.max_y_axis\n\n if as_date:\n dat = add_date_column(dat)\n x_kwargs = {\"shorthand\": \"date:T\", \"title\": \"Date\", \"axis\": alt.Axis(format=(DATE_FORMAT))}\n else:\n x_kwargs = {\"shorthand\": \"day\", \"title\": \"Days from today\"}\n\n y_scale = alt.Scale()\n\n if max_y_axis is not None:\n y_scale.domain = (0, max_y_axis)\n\n return (\n alt.Chart(dat)\n .transform_fold(fold=[\"infected\", \"recovered\"])\n .mark_line()\n .encode(\n x=alt.X(**x_kwargs),\n y=alt.Y(\"value:Q\", title=\"Case Volume\", scale=y_scale),\n tooltip=[\"key:N\", \"value:Q\"],\n color=\"key:N\",\n )\n .interactive()\n )\n\n\ndef chart_descriptions(chart: Chart, labels, suffix: str = \"\"):\n \"\"\"\n\n :param chart: Chart: The alt chart to be used in finding max points\n :param suffix: str: The assumption is that the charts have similar column names.\n The census chart adds \" Census\" to the column names.\n Make sure to include a space or underscore as appropriate\n :return: str: Returns a multi-line string description of the results\n \"\"\"\n messages = []\n\n cols = [\"hospitalized\", \"icu\", \"ventilated\"]\n asterisk = False\n day = \"date\" if \"date\" in chart.data.columns else \"day\"\n\n for col in cols:\n if chart.data[col].idxmax() + 1 == len(chart.data):\n asterisk = True\n\n on = chart.data[day][chart.data[col].idxmax()]\n if day == \"date\":\n on = datetime.datetime.strftime(on, \"%b %d\") # todo: bring this to an optional arg / i18n\n else:\n on += 1 # 0 index issue\n\n messages.append(\n \"{}{} peaks at {:,} on day {}{}\".format(\n labels[col],\n suffix,\n ceil(chart.data[col].max()),\n on,\n \"*\" if asterisk else \"\",\n )\n )\n\n if asterisk:\n messages.append(\"_* The max is at the upper bound of the data, and therefore may not be the actual max_\")\n return \"\\n\\n\".join(messages)\n"
] |
[
[
"pandas.DataFrame",
"numpy.ceil"
]
] |
ahijevyc/NSC_objects
|
[
"322728a71ec011b681b0038e9dcd86df1f73b2fd"
] |
[
"neural_network_lrp.py"
] |
[
"#!/usr/bin/env python\n\nimport numpy as np\nimport datetime as dt\nimport sys, os, pickle, time\nfrom keras.models import Model, save_model, load_model\nfrom keras.regularizers import l2\nfrom keras.optimizers import SGD, Adam\nimport keras.backend as K\nimport tensorflow as tf\nimport pandas as pd\nimport innvestigate\nimport innvestigate.utils as iutils\n\nfrom ml_functions import read_csv_files, normalize_multivariate_data, log, get_features\n\ndef brier_score_keras(obs, preds):\n return K.mean((preds - obs) ** 2)\n\ndef brier_skill_score_keras(obs, preds):\n climo = K.mean((obs - K.mean(obs)) ** 2)\n bs = brier_score_keras(obs, preds)\n ratio = (bs / climo)\n return climo\n\ndef auc(obs, preds):\n auc = tf.metrics.auc(obs, preds)[1]\n K.get_session().run(tf.local_variables_initializer())\n return auc\n\ndef log(msg):\n print( time.ctime(time.time()), msg ) \n\n### NEURAL NETWORK PARAMETERS ###\nnn_params = { 'num_layers': 1, 'num_neurons': [ 1024 ], 'dropout': 0.1, 'lr': 0.001, 'num_epochs': 30, \\\n 'report_window_space':[ int(sys.argv[1]) ], 'report_window_time':[ int(sys.argv[2]) ] }\n\ndataset = 'RT2020'\nscaling_dataset = 'NSC3km-12sec'\nscaling_file = '/glade/work/sobash/NSC_objects/scaling_values_all_%s.pk'%scaling_dataset\n\ntrained_models_dir = '/glade/work/sobash/NSC_objects/trained_models_paper'\n\nsdate = dt.datetime(2020,5,1,0,0,0)\nedate = dt.datetime(2020,5,10,0,0,0)\ndateinc = dt.timedelta(days=1)\n\nfeatures = get_features('basic')\n\nlog('Reading Data')\n# read data and reassign data types to float32 to save memory\ntype_dict = {}\nfor f in features: type_dict[f]='float32'\n\ndf, numfcsts = read_csv_files(sdate, edate, dataset)\nprint(numfcsts)\n\nscaling_values = pickle.load(open(scaling_file, 'rb'))\nnorm_in_data, scaling_values = normalize_multivariate_data(df[features].values.astype(np.float32), features, scaling_values=scaling_values)\n\ndense_model = None\nmodel_fname = '%s/neural_network_2016_120km_2hr_nn%d_drop%.1f_basic.h5'%(trained_models_dir,nn_params['num_neurons'][0],nn_params['dropout'])\ndense_model = load_model(model_fname, custom_objects={'brier_score_keras': brier_score_keras, 'brier_skill_score_keras':brier_skill_score_keras, 'auc':auc })\n\nprint(norm_in_data.shape)\n\nanalyzer = innvestigate.create_analyzer('lrp.alpha_2_beta_1', dense_model, neuron_selection_mode='index')\na = analyzer.analyze(norm_in_data, 0)\n\na /= np.max(np.abs(a))\n\na = a.reshape((36,1298,-1))\na = np.mean(a[24,:,:], axis=0)\nprint(a.shape)\n\nfor i,f in enumerate(features): \n print(f, a[i])\n\nlog('Finished')\n"
] |
[
[
"tensorflow.local_variables_initializer",
"numpy.abs",
"tensorflow.metrics.auc",
"numpy.mean"
]
] |
MahendraSondagar/STMicroelectronics
|
[
"1b3cab9da8e9a23b2372573b08f6a55ea4424668"
] |
[
"SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Middlewares/ST/STM32_AI_AudioPreprocessing_Library/Python/MFCC.py"
] |
[
"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# This software component is licensed by ST under BSD 3-Clause license,\r\n# the \"License\"; You may not use this file except in compliance with the\r\n# License. You may obtain a copy of the License at:\r\n# https://opensource.org/licenses/BSD-3-Clause\r\n\r\n\r\n\"\"\"KWS Feature Extraction example.\"\"\"\r\n\r\nimport numpy as np\r\nimport librosa\r\nimport scipy\r\nfrom scipy.signal import hann\r\nfrom scipy.fftpack import dct\r\n\r\n\r\ndef mfcc_col(buff_test):\r\n\r\n window = 2048\r\n half_window = int(window / 2)\r\n n_mels = 128\r\n n_coeff = 13\r\n\r\n assert buff_test.shape == (window,)\r\n\r\n hann_asym_f32 = hann(window, sym=False).astype('float32')\r\n assert hann_asym_f32.shape == (window,), hann_asym_f32.shape\r\n\r\n buff_hann = buff_test * hann_asym_f32\r\n assert buff_hann.shape == (window,), buff_hann.shape\r\n\r\n fft = np.fft.fft(buff_hann, window)[:half_window + 1]\r\n assert fft.shape == (half_window + 1,), fft.shape\r\n\r\n ps = np.abs(fft)**2\r\n assert ps.shape == (half_window + 1,)\r\n\r\n mel = librosa.filters.mel(sr, window, n_mels)\r\n assert mel.shape == (n_mels, half_window + 1)\r\n\r\n energy = np.dot(mel, ps)\r\n assert energy.shape == (n_mels,)\r\n\r\n logamplitude = 10 * np.log10(energy)\r\n assert logamplitude.shape == (n_mels,)\r\n\r\n dct_out = dct(logamplitude, type=3)\r\n assert dct_out.shape == (n_mels,)\r\n\r\n return(dct_out[1:(n_coeff + 1)])\r\n\r\n\r\n# buffer_bus_01 is made of first 2048 samples of \"bus.wav\" file\r\nsr, ys = scipy.io.wavfile.read(\"bus.wav\")\r\n\r\nbuffer_01 = ys[0:2048]\r\n\r\nmfcc_col = mfcc_col(buffer_01)\r\n\r\nprint('mfcc = ', mfcc_col[:])\r\n"
] |
[
[
"scipy.io.wavfile.read",
"numpy.dot",
"scipy.signal.hann",
"numpy.fft.fft",
"numpy.abs",
"numpy.log10",
"scipy.fftpack.dct"
]
] |
sourcery-ai-bot/datasets
|
[
"b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28",
"b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28"
] |
[
"tensorflow_datasets/image_classification/imagenet2012_real.py",
"tensorflow_datasets/summarization/samsum.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Imagenet val. annotated by ReaL labels (https://arxiv.org/abs/2006.07159).\"\"\"\n\nimport json\nimport os\nimport tarfile\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n\n_DESCRIPTION = '''\\\nThis dataset contains ILSVRC-2012 (ImageNet) validation images augmented with a\nnew set of \"Re-Assessed\" (ReaL) labels from the \"Are we done with ImageNet\"\npaper, see https://arxiv.org/abs/2006.07159. These labels are collected using\nthe enhanced protocol, resulting in multi-label and more accurate annotations.\n\nImportant note: about 3500 examples contain no label, these should be [excluded\nfrom the averaging when computing the accuracy](https://github.com/google-research/reassessed-imagenet#numpy).\nOne possible way of doing this is with the following NumPy code:\n\n```python\nis_correct = [pred in real_labels[i] for i, pred in enumerate(predictions) if real_labels[i]]\nreal_accuracy = np.mean(is_correct)\n```\n'''\n\n_CITATION = '''\\\n@article{beyer2020imagenet,\n title={Are we done with ImageNet?},\n author={Lucas Beyer and Olivier J. Henaff and Alexander Kolesnikov and Xiaohua Zhai and Aaron van den Oord},\n journal={arXiv preprint arXiv:2002.05709},\n year={2020}\n}\n@article{ILSVRC15,\n Author={Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},\n Title={{ImageNet Large Scale Visual Recognition Challenge}},\n Year={2015},\n journal={International Journal of Computer Vision (IJCV)},\n doi={10.1007/s11263-015-0816-y},\n volume={115},\n number={3},\n pages={211-252}\n}\n'''\n\n_VALIDATION_LABELS_FNAME = 'image_classification/imagenet2012_validation_labels.txt'\n_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'\n\n_REAL_LABELS_URL = 'https://raw.githubusercontent.com/google-research/reassessed-imagenet/master/real.json'\n\n\nclass Imagenet2012Real(tfds.core.GeneratorBasedBuilder):\n \"\"\"ImageNet validation images with ReaL labels.\"\"\"\n\n VERSION = tfds.core.Version('1.0.0')\n RELEASE_NOTES = {\n '1.0.0': 'Initial release',\n }\n\n MANUAL_DOWNLOAD_INSTRUCTIONS = \"\"\"\\\n manual_dir should contain `ILSVRC2012_img_val.tar` file.\n You need to register on http://www.image-net.org/download-images in order\n to get the link to download the dataset.\n \"\"\"\n\n def _info(self):\n names_file = tfds.core.tfds_path(_LABELS_FNAME)\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(encoding_format='jpeg'),\n 'original_label': tfds.features.ClassLabel(names_file=names_file),\n 'real_label': tfds.features.Sequence(\n tfds.features.ClassLabel(names_file=names_file)),\n 'file_name': tfds.features.Text(),\n }),\n supervised_keys=('image', 'real_label'),\n homepage='https://github.com/google-research/reassessed-imagenet',\n citation=_CITATION,\n )\n\n def _get_real_labels(self, dl_manager):\n with tf.io.gfile.GFile(dl_manager.download(_REAL_LABELS_URL), 'r') as f:\n # ReaL labels are ordered in the lexicographical order.\n return {'ILSVRC2012_val_{:08}.JPEG'.format(i + 1): labels\n for i, labels in enumerate(json.load(f))}\n\n @staticmethod\n def _get_original_labels(val_path):\n \"\"\"Returns labels for validation.\n\n Args:\n val_path: path to TAR file containing validation images. It is used to\n retrieve the name of pictures and associate them to labels.\n\n Returns:\n dict, mapping from image name (str) to label (str).\n \"\"\"\n labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME))\n with tf.io.gfile.GFile(labels_path) as labels_f:\n # `splitlines` to remove trailing `\\r` in Windows\n labels = labels_f.read().strip().splitlines()\n with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:\n tar = tarfile.open(mode='r:', fileobj=tar_f_obj)\n images = sorted(tar.getnames())\n return dict(zip(images, labels))\n\n def _split_generators(self, dl_manager):\n val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')\n if not tf.io.gfile.exists(val_path):\n raise AssertionError(\n 'ImageNet requires manual download of the data. Please download '\n 'the train and val set and place them into: {}'.format(val_path))\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs={\n 'archive': dl_manager.iter_archive(val_path),\n 'original_labels': self._get_original_labels(val_path),\n 'real_labels': self._get_real_labels(dl_manager),\n },\n ),\n ]\n\n def _generate_examples(self, archive, original_labels, real_labels):\n for fname, fobj in archive:\n record = {\n 'file_name': fname,\n 'image': fobj,\n 'original_label': original_labels[fname],\n 'real_label': real_labels[fname],\n }\n yield fname, record\n",
"# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"SAMSum dataset.\"\"\"\n\nimport json\nimport os\nfrom typing import Dict, Iterator, List, Text, Tuple\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\n@article{gliwa2019samsum,\n title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},\n author={Gliwa, Bogdan and Mochol, Iwona and Biesek, Maciej and Wawer, Aleksander},\n journal={arXiv preprint arXiv:1911.12237},\n year={2019}\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\nSAMSum Corpus contains over 16k chat dialogues with manually annotated\nsummaries.\n\nThere are two features:\n\n - dialogue: text of dialogue.\n - summary: human written summary of the dialogue.\n - id: id of a example.\n\n\"\"\"\n\n_DOCUMENT = \"dialogue\"\n_SUMMARY = \"summary\"\n_ID = \"id\"\n\n\nclass Samsum(tfds.core.GeneratorBasedBuilder):\n \"\"\"SAMSum dataset builder.\"\"\"\n\n VERSION = tfds.core.Version(\"1.0.0\")\n MANUAL_DOWNLOAD_INSTRUCTIONS = \"\"\"\\\n Download https://arxiv.org/src/1911.12237v2/anc/corpus.7z, decompress and\n place train.json, val.json and test.json in the manual follder.\n \"\"\"\n\n def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n _DOCUMENT: tfds.features.Text(),\n _SUMMARY: tfds.features.Text(),\n _ID: tfds.features.Text(),\n }),\n supervised_keys=(_DOCUMENT, _SUMMARY),\n homepage=\"https://arxiv.org/src/1911.12237v2/anc\",\n citation=_CITATION,\n )\n\n def _split_generators(\n self, dl_manager: tfds.download.DownloadManager\n ) -> List[tfds.core.SplitGenerator]:\n \"\"\"Returns SplitGenerators.\"\"\"\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n \"path\": os.path.join(dl_manager.manual_dir, \"train.json\")\n },\n ),\n tfds.core.SplitGenerator(\n name=tfds.Split.VALIDATION,\n gen_kwargs={\n \"path\": os.path.join(dl_manager.manual_dir, \"val.json\")\n },\n ),\n tfds.core.SplitGenerator(\n name=tfds.Split.TEST,\n gen_kwargs={\n \"path\": os.path.join(dl_manager.manual_dir, \"test.json\")\n },\n ),\n ]\n\n def _generate_examples(self,\n path: Text = None\n ) -> Iterator[Tuple[Text, Dict[Text, Text]]]:\n \"\"\"Yields examples.\"\"\"\n with tf.io.gfile.GFile(path, \"rb\") as f:\n for example in json.load(f):\n yield example[_ID], example\n"
] |
[
[
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.io.gfile.exists"
],
[
"tensorflow.compat.v2.io.gfile.GFile"
]
] |
enkaranfiles/predict-future-sales
|
[
"528d004b78b5c0d41720fc46daa487e3928c045e"
] |
[
"preprocessing.py"
] |
[
"import numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import product\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\n\r\n\r\n# =============================================================================\r\n# The lines where we processed our data\r\n# =============================================================================\r\ndef lag_feature(df, lags, col):\r\n tmp = df[['date_block_num','shop_id','item_id',col]]\r\n for i in lags:\r\n shifted = tmp.copy()\r\n shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]\r\n shifted['date_block_num'] += i\r\n df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')\r\n return df\r\n\r\n\r\n\r\nitems = pd.read_csv(r'dataset\\items.csv')\r\nshops = pd.read_csv(r'dataset\\shops.csv')\r\ncats = pd.read_csv(r'dataset\\item_categories.csv')\r\ntrain = pd.read_csv(r'dataset\\sales_train.csv')\r\ntest = pd.read_csv(r'dataset\\test.csv').set_index('ID')\r\n\r\ntrain = train[train.item_price<100000]\r\ntrain = train[train.item_cnt_day<1001]\r\n\r\nmedian = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()\r\ntrain.loc[train.item_price<0, 'item_price'] = median\r\n\r\ntrain.loc[train.shop_id == 0, 'shop_id'] = 57\r\ntest.loc[test.shop_id == 0, 'shop_id'] = 57\r\ntrain.loc[train.shop_id == 1, 'shop_id'] = 58\r\ntest.loc[test.shop_id == 1, 'shop_id'] = 58\r\ntrain.loc[train.shop_id == 10, 'shop_id'] = 11\r\ntest.loc[test.shop_id == 10, 'shop_id'] = 11\r\n\r\n\r\nshops['shop_name'] = shops['shop_name'].apply(lambda x: x.lower()).str.replace('[^\\w\\s]', '').str.replace('\\d+','').str.strip()\r\nshops['city'] = shops['shop_name'].str.partition(' ')[0]\r\nshops['city_code'] = LabelEncoder().fit_transform(shops['city'])\r\nshops['shop_type'] = shops['shop_name'].apply(lambda x: 'ะผััั' if 'ะผััั' in x else 'ััั' if 'ััั' in x else 'ััะบ' if 'ััะบ' in x else 'ัั' if 'ัั' in x else 'ัะบ' if 'ัะบ' in x else 'NO_DATA')\r\nshops['shop_type'] = LabelEncoder().fit_transform(shops['shop_type'])\r\nshops = shops[['shop_id','city_code','shop_type']]\r\n\r\n\r\ncats['split'] = cats['item_category_name'].str.split('-')\r\ncats['type'] = cats['split'].map(lambda x: x[0].strip())\r\ncats['type_code'] = LabelEncoder().fit_transform(cats['type'])\r\n# if subtype is nan then type\r\ncats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())\r\ncats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])\r\ncats = cats[['item_category_id','type_code', 'subtype_code']]\r\nitems.drop(['item_name'], axis=1, inplace=True)\r\n\r\n\r\nmatrix = []\r\ncols = ['date_block_num','shop_id','item_id']\r\nfor i in range(34):\r\n sales = train[train.date_block_num==i]\r\n matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))\r\n \r\nmatrix = pd.DataFrame(np.vstack(matrix), columns=cols)\r\nmatrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)\r\nmatrix['shop_id'] = matrix['shop_id'].astype(np.int8)\r\nmatrix['item_id'] = matrix['item_id'].astype(np.int16)\r\nmatrix.sort_values(cols,inplace=True)\r\n\r\n\r\ntrain['revenue'] = train['item_price'] * train['item_cnt_day']\r\n\r\nitem_price_lag = train.groupby(['date_block_num','item_id']).agg({'item_price':['mean']})\r\nitem_price_lag.columns = ['average_item_price']\r\nitem_price_by_shop_lag = train.groupby(['date_block_num','shop_id', 'item_id']).agg({'item_price':['mean']})\r\nitem_price_by_shop_lag.columns = ['average_item_price_by_shop']\r\ngroup = train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})\r\ngroup.columns = ['item_cnt_month']\r\ngroup.reset_index(inplace=True)\r\n\r\nmatrix = pd.merge(matrix, group, on=cols, how='left')\r\nmatrix['item_cnt_month'] = (matrix['item_cnt_month'].fillna(0).clip(0,20).astype(np.float16))\r\n\r\n\r\n\r\ntest['date_block_num'] = 34\r\ntest['date_block_num'] = test['date_block_num'].astype(np.int8)\r\ntest['shop_id'] = test['shop_id'].astype(np.int8)\r\ntest['item_id'] = test['item_id'].astype(np.int16)\r\n\r\n\r\nmatrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)\r\nmatrix.fillna(0, inplace=True) # 34 month\r\n\r\nmatrix = pd.merge(matrix, item_price_lag, on=['date_block_num','item_id'], how='left')\r\nmatrix['average_item_price'] = matrix['average_item_price'].astype(np.float16)\r\nmatrix = lag_feature(matrix, [1,2,3], 'average_item_price')\r\nmatrix.drop(['average_item_price'], axis=1, inplace=True)\r\nmatrix = pd.merge(matrix, item_price_by_shop_lag, on=['date_block_num','shop_id','item_id'], how='left')\r\nmatrix['average_item_price_by_shop'] = matrix['average_item_price_by_shop'].astype(np.float16)\r\nmatrix = lag_feature(matrix, [1,2,3], 'average_item_price_by_shop')\r\nmatrix.drop(['average_item_price_by_shop'], axis=1, inplace=True)\r\n\r\n\r\n\r\nmatrix = pd.merge(matrix, shops, on=['shop_id'], how='left')\r\nmatrix = pd.merge(matrix, items, on=['item_id'], how='left')\r\nmatrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')\r\nmatrix['city_code'] = matrix['city_code'].astype(np.int8)\r\nmatrix['shop_type'] = matrix['shop_type'].astype(np.int8)\r\nmatrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)\r\nmatrix['type_code'] = matrix['type_code'].astype(np.int8)\r\nmatrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)\r\n\r\n\r\n\r\nshop_mean = matrix.groupby(['shop_id']).agg({'item_cnt_month': ['mean']})\r\nshop_mean.columns = ['shop_mean']\r\nshop_mean.reset_index(inplace=True)\r\nshop_item_mean = matrix.groupby(['item_id','shop_id']).agg({'item_cnt_month': ['mean']})\r\nshop_item_mean.columns = ['shop_item_mean']\r\nshop_item_mean.reset_index(inplace=True)\r\ngroup = matrix.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})\r\ngroup.columns = [ 'date_item_avg_item_cnt' ]\r\ngroup.reset_index(inplace=True)\r\n\r\nmatrix = pd.merge(matrix, shop_mean, on=['shop_id'], how='left')\r\nmatrix = pd.merge(matrix, shop_item_mean, on=['item_id','shop_id'], how='left')\r\nmatrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')\r\nmatrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)\r\nmatrix = lag_feature(matrix, [1,2,3], 'date_item_avg_item_cnt')\r\nmatrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)\r\nmatrix = lag_feature(matrix, [1,2,3], 'item_cnt_month')\r\n\r\nmatrix_last = matrix[matrix.date_block_num > 2]\r\n\r\n\r\ndef fill_na(df):\r\n for col in df.columns:\r\n if ('_lag_' in col) & (df[col].isnull().any()):\r\n if ('item_cnt' in col):\r\n df[col].fillna(0, inplace=True) \r\n if ('shop_mean' in col):\r\n df[col].fillna(0, inplace=True)\r\n if ('average_item_price' in col):\r\n df[col].fillna(0, inplace=True)\r\n return df\r\n\r\nmatrix = fill_na(matrix_last)\r\n\r\nmatrix_last.to_pickle('dataset/traintest.pkl')\r\n\r\n\r\n# =============================================================================\r\n# correlation Matrix\r\n# =============================================================================\r\ncor_data = matrix_last[['shop_item_mean','date_block_num','date_item_avg_item_cnt_lag_1','item_category_id','average_item_price_lag_2','average_item_price_lag_1','item_cnt_month_lag_1','item_cnt_month']]\r\ncorr = cor_data.corr()\r\nmask = np.zeros_like(corr, dtype=np.bool)\r\n\r\nf,ax = plt.subplots(figsize=(15, 20))\r\ncmap = sns.diverging_palette(220, 10, as_cmap=True)\r\nsns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\r\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5},annot=True)\r\nplt.savefig('outputdata/correlation.png')\r\n"
] |
[
[
"numpy.zeros_like",
"sklearn.preprocessing.LabelEncoder",
"pandas.merge",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"pandas.concat",
"pandas.read_csv",
"numpy.vstack"
]
] |
docc-lab/train-ticket
|
[
"350f62000e6658e0e543730580c599d8558253e7"
] |
[
"ts-avatar-service/base64toimage.py"
] |
[
"import base64\nimport numpy as np\nimport cv2\n\npath_save = \"./images/\"\n\n\ndef base64_cv2(base64_str):\n imgString = base64.b64decode(base64_str)\n nparr = np.fromstring(imgString,np.uint8)\n image = cv2.imdecode(nparr,cv2.IMREAD_COLOR)\n\n a = cv2.imwrite(path_save + \"img_face_1\" + \".jpg\", image)\n print(a)\n return image\n\n\nif __name__ == '__main__':\n s = \"/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCACBAIEDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDw6HTNKtNTXUtXvLqGIHe1zfYzLnkYB5OfTjmtyx8cWNvZPJpmhtZ2xP7y8ucebn2U9P1qm0nhK4tZz4Zs572WVtsmoXXzJIxP3lB4B+nFdX4R+C+peJZLbUdfgklWFQRZRnt/ePqa8GpWjA+qo0ZTRH4H0GdrWfxLdSHyJyzvO8mfO/8A1dq8u+MfiWLX9Qj0fSWKxC3fzZlAzFECOOnVm217v8XLuHwv4PuIn0vyLa1G20jQYAJGMn8a+YNe1KbQNCvNXvYdt1cTJHZK3LzysSQq/THSs8PVUpOQ61OUUoFDQ/B9x4m8W2nhkIy2VjAdQ1+YnKwRrggZ6c+lee/tAX0Gr+IrrXYoCjXcix6ZagdIUyi5Hocbvxr3/wAQafL8J/hpb+G711t9c8Qw/wBoa9dSf8srWP5jH7buAR3rwvwN4D8TfHX4pwadpVjOZb2cpZqW+SGEnl2/D7vpXRSrpxdSWyM3QfOqS3Z237C37Jc/xu8cQx31o7aHpbJPf6gv3bifcCI/p2r9U9F8Aad4c0C30/SbNYEiQJ5X/PMAYH51zH7K37O/hz4F/Dy20TTLICUQr57seWb1PrXpGoXUUoaJxsZRwB396+Zx2Iq1qj7I97B4dUY8p4p+0NooPgTVIXQF3t3KH3CmvyC+JfhKWxb7Ax/e3WquxPYfNzX7KfGeNLrRbu3uFBT7LIRn/dNfkV8YtRtYfFlpZ53ZuJ3BPs5FevkD92ovM8zOoyj7N90ed+KvDxsryyhVtyyWUi89zuNcv4j0ww/E29s1LD5I3VQexTFeh61FFNc6MZUDMzSoFI7A5rnPFmleV8W/tjx7hc6dA4A+hFfQnhJJnJ+JbKf+01nK5WWzU5I6gHBrN8SWDXNjd6LMnzLCHQ9xxmuw8WWJSzgkULvjhaF19MtkVheKS0V1Hc7BuuVVG+gGKlP3zOokloef2Ci805LUuGeTdEd394d6+9P+CM15qD2XiHTRcubNrwTMjciN9oGB6dK+DZLdLHU5oYRtG7zIsfw89q+8v+CIWrW0njXxp4NnjzHcwR3tuMfdIA3AVRz1G0tD9EfOt/8An4k/Kit37Hpv/PiP++aKDDnl3OE/Z+/Zrm1zUI9Y1CIw2OStlAwznvuAHHOf0r6XT4P6R4S0O20jR9Ljk1C9cxRIo/1aYy7N9K6n9nfwZDH4RsNSlgEEMdqBc38i/LbpjkKPXGOfeuJ/bb+O+nfBrwJqVr4BBHinV7IxpxuOnWB+US57PJ1x6CvkKq+s1eTufd0ZuhRcz43/AGy/G/hi98dH4WeH/JudL0APLq1+r5E9xnI59BXkPwO+H1t8XfiFc/F3xZ+78PaHC8lt5v3YypB3DPckAfiaxdfsLrxbrFl4C0GOaIXtyJ9Zu2yZZsn7pr1X9oPxbYfAj4UWnwZ8MWUQ1XVQk94qdYox9xcehBzz3UV3yXKo4ePQ44yTTrT3ex4r+0j4pPijxa/h2yvTL9quRc6lKHwAoOLexT/dzlvrX2J/wTh/ZIj0fSX8ceIdMQXEifugBwOSOOOnFfHXwg+FHjD4gfE3To38O3F+iyfa289cb5c/NI2OnoB7V933fx9+Onwt8KQWGjeHLa2toYAkKR2hYnHqcVlia8IuNNK6R1YOjUcXNrVn1FFpljYW/wBhWEcIBwRxXMeLrSO3U3cCHBGNwFfFup/8FGf2hvD17IbzwdZXJXOPMt2Qn8hUFl/wVT17ULxYvGnwtNrF0aSzkL/iVrx6ictj0KUorRn0J8SNl9atBIoMUvyvuHVcf44r8U/j/quoWXxPlsbmNlfTNWubYgevng/y5r9WLT9pLwZ8TbA3vhy9BddshhYncgzzkHpX5u/8FF/htJ4N+NN3r+jKBaeJP9MhkboJFxuH19q9HKJKlNw7nFm1Pnw0J9UeWXXjCRSl6E/eWkjCJ89eGH+FUbnxjNqPiEeJLuUZjt1hhB/iORWNdXsN6BJbSg2jjO7vuNU72CC6vLe0SYBBIuBnvnvX0zaex8vFqSujt/EV5bapK6Iyfwnn3FcN4tu3knAcAGBtq4NbVtdRDfACHcTYyD2Fc/rlvLPqMq/KuZAQCe1YwbdZp7FyjFw1OfvLVpHFyR3xmvqT/gkR47/4Qj9q+x0kSEQaxZvbybjwG5I/PFfNGq2zxWRYEDY+SPWu5/ZI8Uv4X+P/AIa1uG4MQi1eFHYHnlx/jXU5ROCqmkfvp/Ys3980Vj/8LDk/57R/nRUXRyn0d4n+MWjeAvBkelaLZC6mtIlje2EYxcTfwRL65J546V8D/t9/GJvAKX3h7xXfW+peIJ5k1HxFPBgpJKR/o9mh9I+4Hevp2fxjoXwk8Ca3+1D8RoilxpQaPwlpkq5HmuoC3Ei/xEZ4BzjFfnz8O/h54i/ba/aEvtf8W6yttoOk3bX+sapLzCGySS2O1fLULcntOvQ+8lCUvc6dTT/Z50SfwZ4Yu/jx8RwUaMNNLLOPuswyqrnqf9kc0fB74XeJP2o/iJqnxk8Z2rjTrGTF5PO33mYhoLJP+mmAWb0AArpviqn/AAv34g2HwU+HNwLLw7osoFu0owrqrDN3N6MB9xT175r6R8F+EtF8JeEtO8BfDfSJJdJ0ouUkEf7y8mb79xJ6u3OD6dMVNbEzhG32n+Hka0cJGrNS+zHT1Nf4B+AvBnwysm1/WNCt4BNzcSSsAEP9wZ546YFaHxn+MHgyy8Pvdy6AVseSst2sdujD/ZMhBI9wK5nxjB+0B4jgOi/CbwjYQywAeRd6x8wVsH+E9TXwN+354F8V+CvF8Fh8YbXWvG2r+IfD0pi8QXepvb2+j3IXBhjhX5eCMjPWs8Bg5Y58kpWNsfjvqUeaMb+h7t4z/aE+DU2p7bizMKbTiWIJNHt9Tisybwz8K/GTQ6xp4t50kH+vhTaEz6rX58fsV/CDxb8QvGV5bavqWorpNlp7Jc3QuG+SXnbg5wa+mPhPqPiD4TfENvAur6mt7buf9Afd80691PPWqx2WLCO6lexyYHMp4puMoWPoXw98ARab9T8N6hiN/lj8w/OCf6VzH7Qv7GF58cPAF14U1HV1h1JAbjTLw52284HGf9k9MDvXrnwHu7/xVeNZWcTOY/vKx5TngfhzXtWvfDhtP0ea4nQrlAWwMjgV51CtJS5l0PTq0YVI8k9mfgN8XPgt8Y/gv4rm8JeOPCF3bzpISRHCXjmGc+YhHYjnHWuJvItZt5ftMNtcb1ORHJCymv18/aa1bQxqIh1iytrp4AVWWaIZiGevTmvmrxDovhjxXctHY6HbSz7iHKwKNo9cmvo6WbVHTXMtT53EZXCFS0HofDen+JNTt3802J3hssHyKq6prt5JqQup0kjVyD8wOCfQV9d6t4D+Bnhm/KeItR0+O66yISDj6YNc74t8A/Bjx3Y/ZrW/tgUciJYG2t9cYrphi5SfM4s5Z4F7KaPm+9vUvbKSQxOmCq4fvnvW18CGt7X4l6NfXbDaNXj+Vumd4A/UCtD4rfDjTPAUjT2uoyyQcYEwGR6fWqf7P2jy+Kfizo2nQpJ5EmqR9B0O7P8AOu2nL2kOZHm16TirH7S/25P/AM9LX8z/AI0Vm/8ACubn/npN+dFXys845z/gov8AtIX3xR8R3PwY8K6z5WkaFctHe3aLvE9y3/LNQPvBRtHHQg1wOjfFHVfA3w1tP2ePgho7y63ft5mop5JZp3cceaw6qD/D2q/4N/ZL+Mmp+GdHTwP4burrWfETPMLu5TP2dGOTO2eSWJYj2xX6Rf8ABOr/AIJJeFvgnpUfxC+J8banr9zGHuJ5k3E5GcDPTmvloVIRp8tPWR93LmT5qukevdo8J/Y4/wCCdfjTS9L/AOEp8Xxub7UcT6tPKSfOkPUJxyo7V9V2fwI0rwTZRW9pbOFhUbAVOa+p/wDhF9D0KwWxsNLjSKNcRjZjiuL8Z2VlPC7GFQT90YqJ4eUIuc3q9TrpYv2tlBWj0PBj4b02zuWR0IOMjjj8a8L/AGpfgF8OfidF53ieCJzEjFRNEHQHHavpDxbpcULyjeR3yteU+O2sr6N7aZMhUO4kda4VWnTm3FndCgqj1jdH55+Iv2frHwU02m+C7u0sLFZGfyrKAoW471wGk/s36dqnjm21yy1K/wDttrcrIsnmsUbnkYNfcPiH4faNqt3lLQD229a2fAfwI0KLVIpEsIyxOWIXin9Zry0buOeBo025R0Iv2NfgTeabdzeILqzbZdy8Bv7uOP619AfErwCkXhK4hSABhCRz9K7T4MfD2DTtHitYbNQFbsK674i+As+FZ5DBngj3+7W9HC/u23ocNSq1VSR+GP7b817pfxEm0meYEtOwAz/DzXzP8VvHvjTwro9ho3hfwzc3j6nOs+qtbQkMbVWGYgy9Cf5V9lf8FNfAFxoXxKGqBQoLMcEV8/8AgbW7mwnjEZQvjYu4ZGOta4acYNSavY5MXTlO6Ttc+PI/Ds/i/wCOBsNBtbu3s7vUy5tZXZvIjzna2fxr2n4/fDXwh4Os7LUPCTpBqaoqboJMbm9frXvmufDvwj4keTWbzQlhvjgtc2qKhPryBzXnnif4U6CL77UWuJPLcNH9ok3EV7M81o1I8qVmebHLpUVzuTZ4H8fEvv8AhDrO58QSO12IYU+Ycsc/rXr3/BNr4EJ49+LnhS2hR3vJtclupXhOUit7dASGHbnP5V57+1rpix6PosgDYk1EALjl+SFXH9K/RT/git+zbceEfh/ffG7xTY7L7VXaysIiMCFIxhmAPQsSc16GB5alDU8rGy5JcqPsD/hDbT/ngf8Avmiu1/sFPUUV3eyXc8o+jv2UPg3oqCXxXdaYiK7NDZJtBEMEZ2Io/AZ7da+jpPKtLBI4FHyrgDAFcv8ADbw7BoGhQacLUR+Uu1wON3PWuivJUERxnr0r5bC4eNCF1ufU4+s8TiVfZGFrd2Xg2s+MjGa878cXBHyqeF4yO9dxrUkZtSxHO7GPSuM122WfLMuQTXPipSuz1MHGNjyPxzd5eXII4rx7xjZz3LkwuVBbOQOo9K9t8d6aHmkKxkjpwK848SaLgALGRg9DXlODbufQ4erZJHmK20Vtc7nAJB6kV6N8ILW0mvzLKUK7h1riNf0mSNmmC8YOOe9bn7PE91q/xBtdBnzsdwWUHrTpJe0RWJ1gz7B+Gmh2M9jCygKD0xXReONJs30NoWX5QGH14pnhLwxf6bAj2jlUQD5CvWnfFLVYLLw4zi5jVgpyA2STXvJL6ufJ1K3Nikon5H/8FgfhpG+lrr1na4aNiC47ivz00nbbXiLEQxVgeDzn0r9Vf+Cm8llq/wAPriKXGRGSmeufTFfljotkJNaljVfuuSR75ryI7tHqVEuZHpmhSLqdqscp2/KOlUfF3hu0W2ZwvzY64q34Zj+zOok6ECpfGt5CbJxCx3BeFAqrIppKBh/s8/stWP7Q/wAc9A1LxJZiXw/4TvXvr+KVciWVAWVT7Gv0n/Zn0K2tvh+rW9rHbxzapcSwxRxbVRGc4AH4V84/sVeF5PCPwP1fxpexlJtYdjAzLjgDYpHtmvrz4SaMdK8GaZYOwJjtQz5GCCx3Y/Wvq8BBqij4fH1E8Q7nT/ZE9R+VFXvs8P8Ac/WivWPNPuq12BMDAOag1i1nu7B4LWcRyMflYCltX2yMH4OKkklTZuzwK+Xd7HvvdHM+IPLCYXHPXFcvqvlG2YN1zxjrXVeIYfLjMgA4HFcjqU29SpUDnrmuCqnfU93DPQ5DxZpccg3RHAzzXmvi+wdZTtQkAHkCvUte3SExoM8Vy2taRCsLSTclhnbXLUWmh6dOo4s8Y1jRbq8BgEf04xmui+FXgK88Gazb+MLO3CzxPn5jkEeldBZaPa3moFTECoPGa6E2JtljWDhV5xjioo07aoJYhtWaKfx0/az/AGhtE0GOL4LfCOLV7rbtxe6gIIj68is3Qfjrq3iz4YS698TNIXR9UsI8alp0d35qrIemG7ir2qXiW1q864LA/vF3cEZ7DtXx1+0v8W9e1XxpqnhLw34gezjitpGdbVgQSB/FjqaVWpVg9WOFOi43UEvM4/8Abq+Lg8VtLp1rMgjddqNMcBh6818VWOk21l4luIoZInJOWKHI/Csz4j/8Jf4r8es/iTxdqV0sLnyYnuWVQc+n9K2fD2k/ZWEoQZwMsWyTTpwcVfuctSsnW0OjsowrRlG4XPekTRbzxb4os/DWnxM0tzMquPRMjJ/KkhlECEsPvE5PoK9K/Zo8ORw6rc+LtYtt8u39wX/gA7iurDUnOumZY7E+ywzfU+gtH0S10/RvDXwu0wKIWmRHSMjiFBzwPcZr6N8KRwFEZY8ZUADHHHFeA/CC6HiHxNceKWRTFDEILE47d2H1r6B8Jgi3QmvsKCskfATk5TbZ0eI/RaKbsb0orqIuj7G1DVI7S4eRyQD0UdqdbanDd2oaOXJIzj0rnfF+ri3EjlgAK4q1+JEmh3xeebdAT857r9K+DeKaqWlsfdxwMZUYyPQ/ENyDbkt0A5FclNEkuZNhxnjmrcni+x1a3TyZVk3c5XoPas+6vYI3KRvwR0qpVFNGtOk4mRq23cWYDg4ArmfFMqRwM/onSug124hC7gT69a43xXqkLWpTBLbDnBrHY7VscrN438OeGZXOpakqFeRIeF/M1yXxC/bp/Z0+FsCweIPiPYXN4w/48bWZXf6cHr7V43+0f+ydrf7THiC3027+IGpaRZ2kxbZY3DL5oHO0kHv0rzDV/wDgkJ+yrpmrt4hvZLxddQCSK5mvZGDsB1OWxRRtUi3LQ9HAZdTx7XO9e3c9q1X/AIKHw+PLC+g+G3gRJEjQp5l2ArAHvj8K+XPjb8Vv+EJ1hdYufD1rLq2sW0hZoTwi45JHr+Nc98Tvg5qPw2kn1HwP8TLqGZJirJIuQwA6DA6fWvmn47aB+0VqmjjVLb4kRwSO5ZblYyWVQfu4OQKqOGhUloz3quXPAUGvY9N9zZ1HWZdV1qPUxCXW4LFgo5j+talrK/mhI2+XHJzXlvwy+JXi+98Uad4L8T6PHczTDEuoxHaHxwTjtmvZ9Q0a0snWW1RmMgwsacnNOcXF2Pi6kYczaViTw7p134h1e10eAjM8gDFj0x1r2rwzpcOlzRaXDfnYjBZAh6+ozXI+CPAMvhzSBrN6P39yd6BhzHxW3ocgstRi+Y5L5OT1r2cHh3TpczWrPlswxvtans09j6b+FS2lpaxQ28aJGMYRRXtHgyUNECW4J4HpXz/8LNVSSKHc3XFe5eDbxRbja3Oa9qhsjw60W5HdbR/fFFUPtx9VorqMuVn0R8QtY3xSor9GzxXkPizxGVLujEBR0Brr/GniASpKofndnGO1ePeNNbmM0pUjHTOa/KcROXNc/WcPCLjY3NI+K+reFtstmnn2zNmWFm5A9vSu/wDD/wASdD8UwLPpl6rttzLGZDuSvnWXXFUKZG474qsniXUdHl/tfw7ceVOh4V2wGHXB/KnSxU0+WxFSgrcyPpi+vPte4hsqvA+bNctrdu9y+1W4Jwea5X4e/H/w/wCJZ10fWHGnaiqjzopW/dy/TNdjcXVvJKoUhi3IwMcdq6lU5nucyb7HEX2nSWd87QPs2gsNvrXmHxlv9YktDJYzlZ1Q4VYwQ31r2rVNNt5pXkfGADmvK/iJb2SR3BVRtweamTai7HTTk4SXI7Hxl8W9C8W+I9QkOpyoDj/Vqh5H4dK8c8e+CtQi037I88IhXqIY8sPzr6o8fW9qb2Rggx7V8+fFbVrfSri4SNcEDc5I4A6dacJ1OWy3O2rmmNUOXndrdTyjSPB+laHdtqsEZEnH7x1G4j+lfRf7OP7OeseLrQfEPxdA0Ol2z77GKQbftj/3h6KPT2qx+yJ+x9q3xguoPiZ8QtOa38MwTbraBn2vqEinOCDyIx9Oa+wdb0qwg0MaVp9ikMFuoS3hiHCKBivqMry6pKCq1lqj4PN80bg4U3fuz5c8e6SILySBEUKjYAQYUewriZIfJ1ILjo/Few/EbQzvmBj/AI68t1+z+yzllXgda9Sorux85CTWp6f8IdVCmKJjnaQOa+hfAt8kkargYr5R+E+sFLyNN3fivpH4eX5NvGCw61tR0Lk+bU9K+0+9FVftY9TRXUZnrPi//WS/7v8AjXkXjHo/+/RRX5NXP1nDdTjLn7g+lQn/AFK/9dBRRWcSp/w2cj4t/wCRjtv+uy/zr6T8H/8AIBtv+uIooropfEccfhLlz/yD5v8Ark38jXlPjv8A48ZfoaKK3l8JcPiR87fEv/j4P+61fN/xY/15/wCvhP8A0IUUVvg/40fUxxn8KXofo58J/wDkifhX/sGxf+ihTtQ+5J9KKK/SqX8JH5vX+16niPxM/wCPif8A368e8Wffk+lFFefU+NmS2RP8Mf8AkKR/71fSnw5/49ovrRRWlE06HotFFFdJB//Z\"\n base64_cv2(s)\n\n"
] |
[
[
"numpy.fromstring"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.